1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/kthread.h> 29 #include <linux/pci.h> 30 #include <linux/spinlock.h> 31 #include <linux/ctype.h> 32 #include <linux/aer.h> 33 #include <linux/slab.h> 34 #include <linux/firmware.h> 35 #include <linux/miscdevice.h> 36 #include <linux/percpu.h> 37 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_transport_fc.h> 42 43 #include "lpfc_hw4.h" 44 #include "lpfc_hw.h" 45 #include "lpfc_sli.h" 46 #include "lpfc_sli4.h" 47 #include "lpfc_nl.h" 48 #include "lpfc_disc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc.h" 51 #include "lpfc_logmsg.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_vport.h" 54 #include "lpfc_version.h" 55 56 char *_dump_buf_data; 57 unsigned long _dump_buf_data_order; 58 char *_dump_buf_dif; 59 unsigned long _dump_buf_dif_order; 60 spinlock_t _dump_buf_lock; 61 62 /* Used when mapping IRQ vectors in a driver centric manner */ 63 uint16_t *lpfc_used_cpu; 64 uint32_t lpfc_present_cpu; 65 66 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 67 static int lpfc_post_rcv_buf(struct lpfc_hba *); 68 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 69 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 70 static int lpfc_setup_endian_order(struct lpfc_hba *); 71 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 72 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 73 static void lpfc_init_sgl_list(struct lpfc_hba *); 74 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 75 static void lpfc_free_active_sgl(struct lpfc_hba *); 76 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 77 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 78 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 79 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 80 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 81 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 82 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 83 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 84 85 static struct scsi_transport_template *lpfc_transport_template = NULL; 86 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 87 static DEFINE_IDR(lpfc_hba_index); 88 89 /** 90 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 91 * @phba: pointer to lpfc hba data structure. 92 * 93 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 94 * mailbox command. It retrieves the revision information from the HBA and 95 * collects the Vital Product Data (VPD) about the HBA for preparing the 96 * configuration of the HBA. 97 * 98 * Return codes: 99 * 0 - success. 100 * -ERESTART - requests the SLI layer to reset the HBA and try again. 101 * Any other value - indicates an error. 102 **/ 103 int 104 lpfc_config_port_prep(struct lpfc_hba *phba) 105 { 106 lpfc_vpd_t *vp = &phba->vpd; 107 int i = 0, rc; 108 LPFC_MBOXQ_t *pmb; 109 MAILBOX_t *mb; 110 char *lpfc_vpd_data = NULL; 111 uint16_t offset = 0; 112 static char licensed[56] = 113 "key unlock for use with gnu public licensed code only\0"; 114 static int init_key = 1; 115 116 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 117 if (!pmb) { 118 phba->link_state = LPFC_HBA_ERROR; 119 return -ENOMEM; 120 } 121 122 mb = &pmb->u.mb; 123 phba->link_state = LPFC_INIT_MBX_CMDS; 124 125 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 126 if (init_key) { 127 uint32_t *ptext = (uint32_t *) licensed; 128 129 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 130 *ptext = cpu_to_be32(*ptext); 131 init_key = 0; 132 } 133 134 lpfc_read_nv(phba, pmb); 135 memset((char*)mb->un.varRDnvp.rsvd3, 0, 136 sizeof (mb->un.varRDnvp.rsvd3)); 137 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 138 sizeof (licensed)); 139 140 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 141 142 if (rc != MBX_SUCCESS) { 143 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 144 "0324 Config Port initialization " 145 "error, mbxCmd x%x READ_NVPARM, " 146 "mbxStatus x%x\n", 147 mb->mbxCommand, mb->mbxStatus); 148 mempool_free(pmb, phba->mbox_mem_pool); 149 return -ERESTART; 150 } 151 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 152 sizeof(phba->wwnn)); 153 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 154 sizeof(phba->wwpn)); 155 } 156 157 phba->sli3_options = 0x0; 158 159 /* Setup and issue mailbox READ REV command */ 160 lpfc_read_rev(phba, pmb); 161 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 162 if (rc != MBX_SUCCESS) { 163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 164 "0439 Adapter failed to init, mbxCmd x%x " 165 "READ_REV, mbxStatus x%x\n", 166 mb->mbxCommand, mb->mbxStatus); 167 mempool_free( pmb, phba->mbox_mem_pool); 168 return -ERESTART; 169 } 170 171 172 /* 173 * The value of rr must be 1 since the driver set the cv field to 1. 174 * This setting requires the FW to set all revision fields. 175 */ 176 if (mb->un.varRdRev.rr == 0) { 177 vp->rev.rBit = 0; 178 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 179 "0440 Adapter failed to init, READ_REV has " 180 "missing revision information.\n"); 181 mempool_free(pmb, phba->mbox_mem_pool); 182 return -ERESTART; 183 } 184 185 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 186 mempool_free(pmb, phba->mbox_mem_pool); 187 return -EINVAL; 188 } 189 190 /* Save information as VPD data */ 191 vp->rev.rBit = 1; 192 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 193 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 194 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 195 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 196 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 197 vp->rev.biuRev = mb->un.varRdRev.biuRev; 198 vp->rev.smRev = mb->un.varRdRev.smRev; 199 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 200 vp->rev.endecRev = mb->un.varRdRev.endecRev; 201 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 202 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 203 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 204 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 205 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 206 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 207 208 /* If the sli feature level is less then 9, we must 209 * tear down all RPIs and VPIs on link down if NPIV 210 * is enabled. 211 */ 212 if (vp->rev.feaLevelHigh < 9) 213 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 214 215 if (lpfc_is_LC_HBA(phba->pcidev->device)) 216 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 217 sizeof (phba->RandomData)); 218 219 /* Get adapter VPD information */ 220 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 221 if (!lpfc_vpd_data) 222 goto out_free_mbox; 223 do { 224 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 225 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 226 227 if (rc != MBX_SUCCESS) { 228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 229 "0441 VPD not present on adapter, " 230 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 231 mb->mbxCommand, mb->mbxStatus); 232 mb->un.varDmp.word_cnt = 0; 233 } 234 /* dump mem may return a zero when finished or we got a 235 * mailbox error, either way we are done. 236 */ 237 if (mb->un.varDmp.word_cnt == 0) 238 break; 239 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 240 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 241 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 242 lpfc_vpd_data + offset, 243 mb->un.varDmp.word_cnt); 244 offset += mb->un.varDmp.word_cnt; 245 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 246 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 247 248 kfree(lpfc_vpd_data); 249 out_free_mbox: 250 mempool_free(pmb, phba->mbox_mem_pool); 251 return 0; 252 } 253 254 /** 255 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 256 * @phba: pointer to lpfc hba data structure. 257 * @pmboxq: pointer to the driver internal queue element for mailbox command. 258 * 259 * This is the completion handler for driver's configuring asynchronous event 260 * mailbox command to the device. If the mailbox command returns successfully, 261 * it will set internal async event support flag to 1; otherwise, it will 262 * set internal async event support flag to 0. 263 **/ 264 static void 265 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 266 { 267 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 268 phba->temp_sensor_support = 1; 269 else 270 phba->temp_sensor_support = 0; 271 mempool_free(pmboxq, phba->mbox_mem_pool); 272 return; 273 } 274 275 /** 276 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 277 * @phba: pointer to lpfc hba data structure. 278 * @pmboxq: pointer to the driver internal queue element for mailbox command. 279 * 280 * This is the completion handler for dump mailbox command for getting 281 * wake up parameters. When this command complete, the response contain 282 * Option rom version of the HBA. This function translate the version number 283 * into a human readable string and store it in OptionROMVersion. 284 **/ 285 static void 286 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 287 { 288 struct prog_id *prg; 289 uint32_t prog_id_word; 290 char dist = ' '; 291 /* character array used for decoding dist type. */ 292 char dist_char[] = "nabx"; 293 294 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 295 mempool_free(pmboxq, phba->mbox_mem_pool); 296 return; 297 } 298 299 prg = (struct prog_id *) &prog_id_word; 300 301 /* word 7 contain option rom version */ 302 prog_id_word = pmboxq->u.mb.un.varWords[7]; 303 304 /* Decode the Option rom version word to a readable string */ 305 if (prg->dist < 4) 306 dist = dist_char[prg->dist]; 307 308 if ((prg->dist == 3) && (prg->num == 0)) 309 sprintf(phba->OptionROMVersion, "%d.%d%d", 310 prg->ver, prg->rev, prg->lev); 311 else 312 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 313 prg->ver, prg->rev, prg->lev, 314 dist, prg->num); 315 mempool_free(pmboxq, phba->mbox_mem_pool); 316 return; 317 } 318 319 /** 320 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 321 * cfg_soft_wwnn, cfg_soft_wwpn 322 * @vport: pointer to lpfc vport data structure. 323 * 324 * 325 * Return codes 326 * None. 327 **/ 328 void 329 lpfc_update_vport_wwn(struct lpfc_vport *vport) 330 { 331 /* If the soft name exists then update it using the service params */ 332 if (vport->phba->cfg_soft_wwnn) 333 u64_to_wwn(vport->phba->cfg_soft_wwnn, 334 vport->fc_sparam.nodeName.u.wwn); 335 if (vport->phba->cfg_soft_wwpn) 336 u64_to_wwn(vport->phba->cfg_soft_wwpn, 337 vport->fc_sparam.portName.u.wwn); 338 339 /* 340 * If the name is empty or there exists a soft name 341 * then copy the service params name, otherwise use the fc name 342 */ 343 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 344 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 345 sizeof(struct lpfc_name)); 346 else 347 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 348 sizeof(struct lpfc_name)); 349 350 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) 351 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 352 sizeof(struct lpfc_name)); 353 else 354 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 355 sizeof(struct lpfc_name)); 356 } 357 358 /** 359 * lpfc_config_port_post - Perform lpfc initialization after config port 360 * @phba: pointer to lpfc hba data structure. 361 * 362 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 363 * command call. It performs all internal resource and state setups on the 364 * port: post IOCB buffers, enable appropriate host interrupt attentions, 365 * ELS ring timers, etc. 366 * 367 * Return codes 368 * 0 - success. 369 * Any other value - error. 370 **/ 371 int 372 lpfc_config_port_post(struct lpfc_hba *phba) 373 { 374 struct lpfc_vport *vport = phba->pport; 375 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 376 LPFC_MBOXQ_t *pmb; 377 MAILBOX_t *mb; 378 struct lpfc_dmabuf *mp; 379 struct lpfc_sli *psli = &phba->sli; 380 uint32_t status, timeout; 381 int i, j; 382 int rc; 383 384 spin_lock_irq(&phba->hbalock); 385 /* 386 * If the Config port completed correctly the HBA is not 387 * over heated any more. 388 */ 389 if (phba->over_temp_state == HBA_OVER_TEMP) 390 phba->over_temp_state = HBA_NORMAL_TEMP; 391 spin_unlock_irq(&phba->hbalock); 392 393 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 394 if (!pmb) { 395 phba->link_state = LPFC_HBA_ERROR; 396 return -ENOMEM; 397 } 398 mb = &pmb->u.mb; 399 400 /* Get login parameters for NID. */ 401 rc = lpfc_read_sparam(phba, pmb, 0); 402 if (rc) { 403 mempool_free(pmb, phba->mbox_mem_pool); 404 return -ENOMEM; 405 } 406 407 pmb->vport = vport; 408 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 410 "0448 Adapter failed init, mbxCmd x%x " 411 "READ_SPARM mbxStatus x%x\n", 412 mb->mbxCommand, mb->mbxStatus); 413 phba->link_state = LPFC_HBA_ERROR; 414 mp = (struct lpfc_dmabuf *) pmb->context1; 415 mempool_free(pmb, phba->mbox_mem_pool); 416 lpfc_mbuf_free(phba, mp->virt, mp->phys); 417 kfree(mp); 418 return -EIO; 419 } 420 421 mp = (struct lpfc_dmabuf *) pmb->context1; 422 423 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 424 lpfc_mbuf_free(phba, mp->virt, mp->phys); 425 kfree(mp); 426 pmb->context1 = NULL; 427 lpfc_update_vport_wwn(vport); 428 429 /* Update the fc_host data structures with new wwn. */ 430 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 431 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 432 fc_host_max_npiv_vports(shost) = phba->max_vpi; 433 434 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 435 /* This should be consolidated into parse_vpd ? - mr */ 436 if (phba->SerialNumber[0] == 0) { 437 uint8_t *outptr; 438 439 outptr = &vport->fc_nodename.u.s.IEEE[0]; 440 for (i = 0; i < 12; i++) { 441 status = *outptr++; 442 j = ((status & 0xf0) >> 4); 443 if (j <= 9) 444 phba->SerialNumber[i] = 445 (char)((uint8_t) 0x30 + (uint8_t) j); 446 else 447 phba->SerialNumber[i] = 448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 449 i++; 450 j = (status & 0xf); 451 if (j <= 9) 452 phba->SerialNumber[i] = 453 (char)((uint8_t) 0x30 + (uint8_t) j); 454 else 455 phba->SerialNumber[i] = 456 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 457 } 458 } 459 460 lpfc_read_config(phba, pmb); 461 pmb->vport = vport; 462 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 464 "0453 Adapter failed to init, mbxCmd x%x " 465 "READ_CONFIG, mbxStatus x%x\n", 466 mb->mbxCommand, mb->mbxStatus); 467 phba->link_state = LPFC_HBA_ERROR; 468 mempool_free( pmb, phba->mbox_mem_pool); 469 return -EIO; 470 } 471 472 /* Check if the port is disabled */ 473 lpfc_sli_read_link_ste(phba); 474 475 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 476 i = (mb->un.varRdConfig.max_xri + 1); 477 if (phba->cfg_hba_queue_depth > i) { 478 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 479 "3359 HBA queue depth changed from %d to %d\n", 480 phba->cfg_hba_queue_depth, i); 481 phba->cfg_hba_queue_depth = i; 482 } 483 484 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 485 i = (mb->un.varRdConfig.max_xri >> 3); 486 if (phba->pport->cfg_lun_queue_depth > i) { 487 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 488 "3360 LUN queue depth changed from %d to %d\n", 489 phba->pport->cfg_lun_queue_depth, i); 490 phba->pport->cfg_lun_queue_depth = i; 491 } 492 493 phba->lmt = mb->un.varRdConfig.lmt; 494 495 /* Get the default values for Model Name and Description */ 496 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 497 498 phba->link_state = LPFC_LINK_DOWN; 499 500 /* Only process IOCBs on ELS ring till hba_state is READY */ 501 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr) 502 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 503 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr) 504 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 505 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr) 506 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 507 508 /* Post receive buffers for desired rings */ 509 if (phba->sli_rev != 3) 510 lpfc_post_rcv_buf(phba); 511 512 /* 513 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 514 */ 515 if (phba->intr_type == MSIX) { 516 rc = lpfc_config_msi(phba, pmb); 517 if (rc) { 518 mempool_free(pmb, phba->mbox_mem_pool); 519 return -EIO; 520 } 521 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 522 if (rc != MBX_SUCCESS) { 523 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 524 "0352 Config MSI mailbox command " 525 "failed, mbxCmd x%x, mbxStatus x%x\n", 526 pmb->u.mb.mbxCommand, 527 pmb->u.mb.mbxStatus); 528 mempool_free(pmb, phba->mbox_mem_pool); 529 return -EIO; 530 } 531 } 532 533 spin_lock_irq(&phba->hbalock); 534 /* Initialize ERATT handling flag */ 535 phba->hba_flag &= ~HBA_ERATT_HANDLED; 536 537 /* Enable appropriate host interrupts */ 538 if (lpfc_readl(phba->HCregaddr, &status)) { 539 spin_unlock_irq(&phba->hbalock); 540 return -EIO; 541 } 542 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 543 if (psli->num_rings > 0) 544 status |= HC_R0INT_ENA; 545 if (psli->num_rings > 1) 546 status |= HC_R1INT_ENA; 547 if (psli->num_rings > 2) 548 status |= HC_R2INT_ENA; 549 if (psli->num_rings > 3) 550 status |= HC_R3INT_ENA; 551 552 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 553 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 554 status &= ~(HC_R0INT_ENA); 555 556 writel(status, phba->HCregaddr); 557 readl(phba->HCregaddr); /* flush */ 558 spin_unlock_irq(&phba->hbalock); 559 560 /* Set up ring-0 (ELS) timer */ 561 timeout = phba->fc_ratov * 2; 562 mod_timer(&vport->els_tmofunc, 563 jiffies + msecs_to_jiffies(1000 * timeout)); 564 /* Set up heart beat (HB) timer */ 565 mod_timer(&phba->hb_tmofunc, 566 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 567 phba->hb_outstanding = 0; 568 phba->last_completion_time = jiffies; 569 /* Set up error attention (ERATT) polling timer */ 570 mod_timer(&phba->eratt_poll, 571 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); 572 573 if (phba->hba_flag & LINK_DISABLED) { 574 lpfc_printf_log(phba, 575 KERN_ERR, LOG_INIT, 576 "2598 Adapter Link is disabled.\n"); 577 lpfc_down_link(phba, pmb); 578 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 579 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 580 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 581 lpfc_printf_log(phba, 582 KERN_ERR, LOG_INIT, 583 "2599 Adapter failed to issue DOWN_LINK" 584 " mbox command rc 0x%x\n", rc); 585 586 mempool_free(pmb, phba->mbox_mem_pool); 587 return -EIO; 588 } 589 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 590 mempool_free(pmb, phba->mbox_mem_pool); 591 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 592 if (rc) 593 return rc; 594 } 595 /* MBOX buffer will be freed in mbox compl */ 596 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 597 if (!pmb) { 598 phba->link_state = LPFC_HBA_ERROR; 599 return -ENOMEM; 600 } 601 602 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 603 pmb->mbox_cmpl = lpfc_config_async_cmpl; 604 pmb->vport = phba->pport; 605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 606 607 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 608 lpfc_printf_log(phba, 609 KERN_ERR, 610 LOG_INIT, 611 "0456 Adapter failed to issue " 612 "ASYNCEVT_ENABLE mbox status x%x\n", 613 rc); 614 mempool_free(pmb, phba->mbox_mem_pool); 615 } 616 617 /* Get Option rom version */ 618 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 619 if (!pmb) { 620 phba->link_state = LPFC_HBA_ERROR; 621 return -ENOMEM; 622 } 623 624 lpfc_dump_wakeup_param(phba, pmb); 625 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 626 pmb->vport = phba->pport; 627 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 628 629 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 631 "to get Option ROM version status x%x\n", rc); 632 mempool_free(pmb, phba->mbox_mem_pool); 633 } 634 635 return 0; 636 } 637 638 /** 639 * lpfc_hba_init_link - Initialize the FC link 640 * @phba: pointer to lpfc hba data structure. 641 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 642 * 643 * This routine will issue the INIT_LINK mailbox command call. 644 * It is available to other drivers through the lpfc_hba data 645 * structure for use as a delayed link up mechanism with the 646 * module parameter lpfc_suppress_link_up. 647 * 648 * Return code 649 * 0 - success 650 * Any other value - error 651 **/ 652 int 653 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 654 { 655 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 656 } 657 658 /** 659 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 660 * @phba: pointer to lpfc hba data structure. 661 * @fc_topology: desired fc topology. 662 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 663 * 664 * This routine will issue the INIT_LINK mailbox command call. 665 * It is available to other drivers through the lpfc_hba data 666 * structure for use as a delayed link up mechanism with the 667 * module parameter lpfc_suppress_link_up. 668 * 669 * Return code 670 * 0 - success 671 * Any other value - error 672 **/ 673 int 674 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 675 uint32_t flag) 676 { 677 struct lpfc_vport *vport = phba->pport; 678 LPFC_MBOXQ_t *pmb; 679 MAILBOX_t *mb; 680 int rc; 681 682 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 683 if (!pmb) { 684 phba->link_state = LPFC_HBA_ERROR; 685 return -ENOMEM; 686 } 687 mb = &pmb->u.mb; 688 pmb->vport = vport; 689 690 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 691 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 692 !(phba->lmt & LMT_1Gb)) || 693 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 694 !(phba->lmt & LMT_2Gb)) || 695 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 696 !(phba->lmt & LMT_4Gb)) || 697 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 698 !(phba->lmt & LMT_8Gb)) || 699 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 700 !(phba->lmt & LMT_10Gb)) || 701 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 702 !(phba->lmt & LMT_16Gb))) { 703 /* Reset link speed to auto */ 704 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 705 "1302 Invalid speed for this board:%d " 706 "Reset link speed to auto.\n", 707 phba->cfg_link_speed); 708 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 709 } 710 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 711 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 712 if (phba->sli_rev < LPFC_SLI_REV4) 713 lpfc_set_loopback_flag(phba); 714 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 715 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 717 "0498 Adapter failed to init, mbxCmd x%x " 718 "INIT_LINK, mbxStatus x%x\n", 719 mb->mbxCommand, mb->mbxStatus); 720 if (phba->sli_rev <= LPFC_SLI_REV3) { 721 /* Clear all interrupt enable conditions */ 722 writel(0, phba->HCregaddr); 723 readl(phba->HCregaddr); /* flush */ 724 /* Clear all pending interrupts */ 725 writel(0xffffffff, phba->HAregaddr); 726 readl(phba->HAregaddr); /* flush */ 727 } 728 phba->link_state = LPFC_HBA_ERROR; 729 if (rc != MBX_BUSY || flag == MBX_POLL) 730 mempool_free(pmb, phba->mbox_mem_pool); 731 return -EIO; 732 } 733 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 734 if (flag == MBX_POLL) 735 mempool_free(pmb, phba->mbox_mem_pool); 736 737 return 0; 738 } 739 740 /** 741 * lpfc_hba_down_link - this routine downs the FC link 742 * @phba: pointer to lpfc hba data structure. 743 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 744 * 745 * This routine will issue the DOWN_LINK mailbox command call. 746 * It is available to other drivers through the lpfc_hba data 747 * structure for use to stop the link. 748 * 749 * Return code 750 * 0 - success 751 * Any other value - error 752 **/ 753 int 754 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 755 { 756 LPFC_MBOXQ_t *pmb; 757 int rc; 758 759 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 760 if (!pmb) { 761 phba->link_state = LPFC_HBA_ERROR; 762 return -ENOMEM; 763 } 764 765 lpfc_printf_log(phba, 766 KERN_ERR, LOG_INIT, 767 "0491 Adapter Link is disabled.\n"); 768 lpfc_down_link(phba, pmb); 769 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 770 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 771 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 772 lpfc_printf_log(phba, 773 KERN_ERR, LOG_INIT, 774 "2522 Adapter failed to issue DOWN_LINK" 775 " mbox command rc 0x%x\n", rc); 776 777 mempool_free(pmb, phba->mbox_mem_pool); 778 return -EIO; 779 } 780 if (flag == MBX_POLL) 781 mempool_free(pmb, phba->mbox_mem_pool); 782 783 return 0; 784 } 785 786 /** 787 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 788 * @phba: pointer to lpfc HBA data structure. 789 * 790 * This routine will do LPFC uninitialization before the HBA is reset when 791 * bringing down the SLI Layer. 792 * 793 * Return codes 794 * 0 - success. 795 * Any other value - error. 796 **/ 797 int 798 lpfc_hba_down_prep(struct lpfc_hba *phba) 799 { 800 struct lpfc_vport **vports; 801 int i; 802 803 if (phba->sli_rev <= LPFC_SLI_REV3) { 804 /* Disable interrupts */ 805 writel(0, phba->HCregaddr); 806 readl(phba->HCregaddr); /* flush */ 807 } 808 809 if (phba->pport->load_flag & FC_UNLOADING) 810 lpfc_cleanup_discovery_resources(phba->pport); 811 else { 812 vports = lpfc_create_vport_work_array(phba); 813 if (vports != NULL) 814 for (i = 0; i <= phba->max_vports && 815 vports[i] != NULL; i++) 816 lpfc_cleanup_discovery_resources(vports[i]); 817 lpfc_destroy_vport_work_array(phba, vports); 818 } 819 return 0; 820 } 821 822 /** 823 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 824 * @phba: pointer to lpfc HBA data structure. 825 * 826 * This routine will do uninitialization after the HBA is reset when bring 827 * down the SLI Layer. 828 * 829 * Return codes 830 * 0 - success. 831 * Any other value - error. 832 **/ 833 static int 834 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 835 { 836 struct lpfc_sli *psli = &phba->sli; 837 struct lpfc_sli_ring *pring; 838 struct lpfc_dmabuf *mp, *next_mp; 839 LIST_HEAD(completions); 840 int i; 841 842 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 843 lpfc_sli_hbqbuf_free_all(phba); 844 else { 845 /* Cleanup preposted buffers on the ELS ring */ 846 pring = &psli->ring[LPFC_ELS_RING]; 847 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 848 list_del(&mp->list); 849 pring->postbufq_cnt--; 850 lpfc_mbuf_free(phba, mp->virt, mp->phys); 851 kfree(mp); 852 } 853 } 854 855 spin_lock_irq(&phba->hbalock); 856 for (i = 0; i < psli->num_rings; i++) { 857 pring = &psli->ring[i]; 858 859 /* At this point in time the HBA is either reset or DOA. Either 860 * way, nothing should be on txcmplq as it will NEVER complete. 861 */ 862 list_splice_init(&pring->txcmplq, &completions); 863 spin_unlock_irq(&phba->hbalock); 864 865 /* Cancel all the IOCBs from the completions list */ 866 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 867 IOERR_SLI_ABORTED); 868 869 lpfc_sli_abort_iocb_ring(phba, pring); 870 spin_lock_irq(&phba->hbalock); 871 } 872 spin_unlock_irq(&phba->hbalock); 873 874 return 0; 875 } 876 877 /** 878 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 879 * @phba: pointer to lpfc HBA data structure. 880 * 881 * This routine will do uninitialization after the HBA is reset when bring 882 * down the SLI Layer. 883 * 884 * Return codes 885 * 0 - success. 886 * Any other value - error. 887 **/ 888 static int 889 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 890 { 891 struct lpfc_scsi_buf *psb, *psb_next; 892 LIST_HEAD(aborts); 893 int ret; 894 unsigned long iflag = 0; 895 struct lpfc_sglq *sglq_entry = NULL; 896 897 ret = lpfc_hba_down_post_s3(phba); 898 if (ret) 899 return ret; 900 /* At this point in time the HBA is either reset or DOA. Either 901 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 902 * on the lpfc_sgl_list so that it can either be freed if the 903 * driver is unloading or reposted if the driver is restarting 904 * the port. 905 */ 906 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 907 /* scsl_buf_list */ 908 /* abts_sgl_list_lock required because worker thread uses this 909 * list. 910 */ 911 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 912 list_for_each_entry(sglq_entry, 913 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 914 sglq_entry->state = SGL_FREED; 915 916 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 917 &phba->sli4_hba.lpfc_sgl_list); 918 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 919 /* abts_scsi_buf_list_lock required because worker thread uses this 920 * list. 921 */ 922 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 923 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 924 &aborts); 925 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 926 spin_unlock_irq(&phba->hbalock); 927 928 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 929 psb->pCmd = NULL; 930 psb->status = IOSTAT_SUCCESS; 931 } 932 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 933 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 934 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 935 return 0; 936 } 937 938 /** 939 * lpfc_hba_down_post - Wrapper func for hba down post routine 940 * @phba: pointer to lpfc HBA data structure. 941 * 942 * This routine wraps the actual SLI3 or SLI4 routine for performing 943 * uninitialization after the HBA is reset when bring down the SLI Layer. 944 * 945 * Return codes 946 * 0 - success. 947 * Any other value - error. 948 **/ 949 int 950 lpfc_hba_down_post(struct lpfc_hba *phba) 951 { 952 return (*phba->lpfc_hba_down_post)(phba); 953 } 954 955 /** 956 * lpfc_hb_timeout - The HBA-timer timeout handler 957 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 958 * 959 * This is the HBA-timer timeout handler registered to the lpfc driver. When 960 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 961 * work-port-events bitmap and the worker thread is notified. This timeout 962 * event will be used by the worker thread to invoke the actual timeout 963 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 964 * be performed in the timeout handler and the HBA timeout event bit shall 965 * be cleared by the worker thread after it has taken the event bitmap out. 966 **/ 967 static void 968 lpfc_hb_timeout(unsigned long ptr) 969 { 970 struct lpfc_hba *phba; 971 uint32_t tmo_posted; 972 unsigned long iflag; 973 974 phba = (struct lpfc_hba *)ptr; 975 976 /* Check for heart beat timeout conditions */ 977 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 978 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 979 if (!tmo_posted) 980 phba->pport->work_port_events |= WORKER_HB_TMO; 981 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 982 983 /* Tell the worker thread there is work to do */ 984 if (!tmo_posted) 985 lpfc_worker_wake_up(phba); 986 return; 987 } 988 989 /** 990 * lpfc_rrq_timeout - The RRQ-timer timeout handler 991 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 992 * 993 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 994 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 995 * work-port-events bitmap and the worker thread is notified. This timeout 996 * event will be used by the worker thread to invoke the actual timeout 997 * handler routine, lpfc_rrq_handler. Any periodical operations will 998 * be performed in the timeout handler and the RRQ timeout event bit shall 999 * be cleared by the worker thread after it has taken the event bitmap out. 1000 **/ 1001 static void 1002 lpfc_rrq_timeout(unsigned long ptr) 1003 { 1004 struct lpfc_hba *phba; 1005 unsigned long iflag; 1006 1007 phba = (struct lpfc_hba *)ptr; 1008 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1009 if (!(phba->pport->load_flag & FC_UNLOADING)) 1010 phba->hba_flag |= HBA_RRQ_ACTIVE; 1011 else 1012 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1013 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1014 1015 if (!(phba->pport->load_flag & FC_UNLOADING)) 1016 lpfc_worker_wake_up(phba); 1017 } 1018 1019 /** 1020 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1021 * @phba: pointer to lpfc hba data structure. 1022 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1023 * 1024 * This is the callback function to the lpfc heart-beat mailbox command. 1025 * If configured, the lpfc driver issues the heart-beat mailbox command to 1026 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1027 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1028 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1029 * heart-beat outstanding state. Once the mailbox command comes back and 1030 * no error conditions detected, the heart-beat mailbox command timer is 1031 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1032 * state is cleared for the next heart-beat. If the timer expired with the 1033 * heart-beat outstanding state set, the driver will put the HBA offline. 1034 **/ 1035 static void 1036 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1037 { 1038 unsigned long drvr_flag; 1039 1040 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1041 phba->hb_outstanding = 0; 1042 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1043 1044 /* Check and reset heart-beat timer is necessary */ 1045 mempool_free(pmboxq, phba->mbox_mem_pool); 1046 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1047 !(phba->link_state == LPFC_HBA_ERROR) && 1048 !(phba->pport->load_flag & FC_UNLOADING)) 1049 mod_timer(&phba->hb_tmofunc, 1050 jiffies + 1051 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1052 return; 1053 } 1054 1055 /** 1056 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1057 * @phba: pointer to lpfc hba data structure. 1058 * 1059 * This is the actual HBA-timer timeout handler to be invoked by the worker 1060 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1061 * handler performs any periodic operations needed for the device. If such 1062 * periodic event has already been attended to either in the interrupt handler 1063 * or by processing slow-ring or fast-ring events within the HBA-timer 1064 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1065 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1066 * is configured and there is no heart-beat mailbox command outstanding, a 1067 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1068 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1069 * to offline. 1070 **/ 1071 void 1072 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1073 { 1074 struct lpfc_vport **vports; 1075 LPFC_MBOXQ_t *pmboxq; 1076 struct lpfc_dmabuf *buf_ptr; 1077 int retval, i; 1078 struct lpfc_sli *psli = &phba->sli; 1079 LIST_HEAD(completions); 1080 1081 vports = lpfc_create_vport_work_array(phba); 1082 if (vports != NULL) 1083 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1084 lpfc_rcv_seq_check_edtov(vports[i]); 1085 lpfc_destroy_vport_work_array(phba, vports); 1086 1087 if ((phba->link_state == LPFC_HBA_ERROR) || 1088 (phba->pport->load_flag & FC_UNLOADING) || 1089 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1090 return; 1091 1092 spin_lock_irq(&phba->pport->work_port_lock); 1093 1094 if (time_after(phba->last_completion_time + 1095 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1096 jiffies)) { 1097 spin_unlock_irq(&phba->pport->work_port_lock); 1098 if (!phba->hb_outstanding) 1099 mod_timer(&phba->hb_tmofunc, 1100 jiffies + 1101 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1102 else 1103 mod_timer(&phba->hb_tmofunc, 1104 jiffies + 1105 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1106 return; 1107 } 1108 spin_unlock_irq(&phba->pport->work_port_lock); 1109 1110 if (phba->elsbuf_cnt && 1111 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1112 spin_lock_irq(&phba->hbalock); 1113 list_splice_init(&phba->elsbuf, &completions); 1114 phba->elsbuf_cnt = 0; 1115 phba->elsbuf_prev_cnt = 0; 1116 spin_unlock_irq(&phba->hbalock); 1117 1118 while (!list_empty(&completions)) { 1119 list_remove_head(&completions, buf_ptr, 1120 struct lpfc_dmabuf, list); 1121 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1122 kfree(buf_ptr); 1123 } 1124 } 1125 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1126 1127 /* If there is no heart beat outstanding, issue a heartbeat command */ 1128 if (phba->cfg_enable_hba_heartbeat) { 1129 if (!phba->hb_outstanding) { 1130 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1131 (list_empty(&psli->mboxq))) { 1132 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1133 GFP_KERNEL); 1134 if (!pmboxq) { 1135 mod_timer(&phba->hb_tmofunc, 1136 jiffies + 1137 msecs_to_jiffies(1000 * 1138 LPFC_HB_MBOX_INTERVAL)); 1139 return; 1140 } 1141 1142 lpfc_heart_beat(phba, pmboxq); 1143 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1144 pmboxq->vport = phba->pport; 1145 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1146 MBX_NOWAIT); 1147 1148 if (retval != MBX_BUSY && 1149 retval != MBX_SUCCESS) { 1150 mempool_free(pmboxq, 1151 phba->mbox_mem_pool); 1152 mod_timer(&phba->hb_tmofunc, 1153 jiffies + 1154 msecs_to_jiffies(1000 * 1155 LPFC_HB_MBOX_INTERVAL)); 1156 return; 1157 } 1158 phba->skipped_hb = 0; 1159 phba->hb_outstanding = 1; 1160 } else if (time_before_eq(phba->last_completion_time, 1161 phba->skipped_hb)) { 1162 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1163 "2857 Last completion time not " 1164 " updated in %d ms\n", 1165 jiffies_to_msecs(jiffies 1166 - phba->last_completion_time)); 1167 } else 1168 phba->skipped_hb = jiffies; 1169 1170 mod_timer(&phba->hb_tmofunc, 1171 jiffies + 1172 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1173 return; 1174 } else { 1175 /* 1176 * If heart beat timeout called with hb_outstanding set 1177 * we need to give the hb mailbox cmd a chance to 1178 * complete or TMO. 1179 */ 1180 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1181 "0459 Adapter heartbeat still out" 1182 "standing:last compl time was %d ms.\n", 1183 jiffies_to_msecs(jiffies 1184 - phba->last_completion_time)); 1185 mod_timer(&phba->hb_tmofunc, 1186 jiffies + 1187 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1188 } 1189 } 1190 } 1191 1192 /** 1193 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1194 * @phba: pointer to lpfc hba data structure. 1195 * 1196 * This routine is called to bring the HBA offline when HBA hardware error 1197 * other than Port Error 6 has been detected. 1198 **/ 1199 static void 1200 lpfc_offline_eratt(struct lpfc_hba *phba) 1201 { 1202 struct lpfc_sli *psli = &phba->sli; 1203 1204 spin_lock_irq(&phba->hbalock); 1205 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1206 spin_unlock_irq(&phba->hbalock); 1207 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1208 1209 lpfc_offline(phba); 1210 lpfc_reset_barrier(phba); 1211 spin_lock_irq(&phba->hbalock); 1212 lpfc_sli_brdreset(phba); 1213 spin_unlock_irq(&phba->hbalock); 1214 lpfc_hba_down_post(phba); 1215 lpfc_sli_brdready(phba, HS_MBRDY); 1216 lpfc_unblock_mgmt_io(phba); 1217 phba->link_state = LPFC_HBA_ERROR; 1218 return; 1219 } 1220 1221 /** 1222 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1223 * @phba: pointer to lpfc hba data structure. 1224 * 1225 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1226 * other than Port Error 6 has been detected. 1227 **/ 1228 void 1229 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1230 { 1231 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1232 lpfc_offline(phba); 1233 lpfc_sli4_brdreset(phba); 1234 lpfc_hba_down_post(phba); 1235 lpfc_sli4_post_status_check(phba); 1236 lpfc_unblock_mgmt_io(phba); 1237 phba->link_state = LPFC_HBA_ERROR; 1238 } 1239 1240 /** 1241 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1242 * @phba: pointer to lpfc hba data structure. 1243 * 1244 * This routine is invoked to handle the deferred HBA hardware error 1245 * conditions. This type of error is indicated by HBA by setting ER1 1246 * and another ER bit in the host status register. The driver will 1247 * wait until the ER1 bit clears before handling the error condition. 1248 **/ 1249 static void 1250 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1251 { 1252 uint32_t old_host_status = phba->work_hs; 1253 struct lpfc_sli_ring *pring; 1254 struct lpfc_sli *psli = &phba->sli; 1255 1256 /* If the pci channel is offline, ignore possible errors, 1257 * since we cannot communicate with the pci card anyway. 1258 */ 1259 if (pci_channel_offline(phba->pcidev)) { 1260 spin_lock_irq(&phba->hbalock); 1261 phba->hba_flag &= ~DEFER_ERATT; 1262 spin_unlock_irq(&phba->hbalock); 1263 return; 1264 } 1265 1266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1267 "0479 Deferred Adapter Hardware Error " 1268 "Data: x%x x%x x%x\n", 1269 phba->work_hs, 1270 phba->work_status[0], phba->work_status[1]); 1271 1272 spin_lock_irq(&phba->hbalock); 1273 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1274 spin_unlock_irq(&phba->hbalock); 1275 1276 1277 /* 1278 * Firmware stops when it triggred erratt. That could cause the I/Os 1279 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1280 * SCSI layer retry it after re-establishing link. 1281 */ 1282 pring = &psli->ring[psli->fcp_ring]; 1283 lpfc_sli_abort_iocb_ring(phba, pring); 1284 1285 /* 1286 * There was a firmware error. Take the hba offline and then 1287 * attempt to restart it. 1288 */ 1289 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1290 lpfc_offline(phba); 1291 1292 /* Wait for the ER1 bit to clear.*/ 1293 while (phba->work_hs & HS_FFER1) { 1294 msleep(100); 1295 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1296 phba->work_hs = UNPLUG_ERR ; 1297 break; 1298 } 1299 /* If driver is unloading let the worker thread continue */ 1300 if (phba->pport->load_flag & FC_UNLOADING) { 1301 phba->work_hs = 0; 1302 break; 1303 } 1304 } 1305 1306 /* 1307 * This is to ptrotect against a race condition in which 1308 * first write to the host attention register clear the 1309 * host status register. 1310 */ 1311 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1312 phba->work_hs = old_host_status & ~HS_FFER1; 1313 1314 spin_lock_irq(&phba->hbalock); 1315 phba->hba_flag &= ~DEFER_ERATT; 1316 spin_unlock_irq(&phba->hbalock); 1317 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1318 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1319 } 1320 1321 static void 1322 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1323 { 1324 struct lpfc_board_event_header board_event; 1325 struct Scsi_Host *shost; 1326 1327 board_event.event_type = FC_REG_BOARD_EVENT; 1328 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1329 shost = lpfc_shost_from_vport(phba->pport); 1330 fc_host_post_vendor_event(shost, fc_get_event_number(), 1331 sizeof(board_event), 1332 (char *) &board_event, 1333 LPFC_NL_VENDOR_ID); 1334 } 1335 1336 /** 1337 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1338 * @phba: pointer to lpfc hba data structure. 1339 * 1340 * This routine is invoked to handle the following HBA hardware error 1341 * conditions: 1342 * 1 - HBA error attention interrupt 1343 * 2 - DMA ring index out of range 1344 * 3 - Mailbox command came back as unknown 1345 **/ 1346 static void 1347 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1348 { 1349 struct lpfc_vport *vport = phba->pport; 1350 struct lpfc_sli *psli = &phba->sli; 1351 struct lpfc_sli_ring *pring; 1352 uint32_t event_data; 1353 unsigned long temperature; 1354 struct temp_event temp_event_data; 1355 struct Scsi_Host *shost; 1356 1357 /* If the pci channel is offline, ignore possible errors, 1358 * since we cannot communicate with the pci card anyway. 1359 */ 1360 if (pci_channel_offline(phba->pcidev)) { 1361 spin_lock_irq(&phba->hbalock); 1362 phba->hba_flag &= ~DEFER_ERATT; 1363 spin_unlock_irq(&phba->hbalock); 1364 return; 1365 } 1366 1367 /* If resets are disabled then leave the HBA alone and return */ 1368 if (!phba->cfg_enable_hba_reset) 1369 return; 1370 1371 /* Send an internal error event to mgmt application */ 1372 lpfc_board_errevt_to_mgmt(phba); 1373 1374 if (phba->hba_flag & DEFER_ERATT) 1375 lpfc_handle_deferred_eratt(phba); 1376 1377 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1378 if (phba->work_hs & HS_FFER6) 1379 /* Re-establishing Link */ 1380 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1381 "1301 Re-establishing Link " 1382 "Data: x%x x%x x%x\n", 1383 phba->work_hs, phba->work_status[0], 1384 phba->work_status[1]); 1385 if (phba->work_hs & HS_FFER8) 1386 /* Device Zeroization */ 1387 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1388 "2861 Host Authentication device " 1389 "zeroization Data:x%x x%x x%x\n", 1390 phba->work_hs, phba->work_status[0], 1391 phba->work_status[1]); 1392 1393 spin_lock_irq(&phba->hbalock); 1394 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1395 spin_unlock_irq(&phba->hbalock); 1396 1397 /* 1398 * Firmware stops when it triggled erratt with HS_FFER6. 1399 * That could cause the I/Os dropped by the firmware. 1400 * Error iocb (I/O) on txcmplq and let the SCSI layer 1401 * retry it after re-establishing link. 1402 */ 1403 pring = &psli->ring[psli->fcp_ring]; 1404 lpfc_sli_abort_iocb_ring(phba, pring); 1405 1406 /* 1407 * There was a firmware error. Take the hba offline and then 1408 * attempt to restart it. 1409 */ 1410 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1411 lpfc_offline(phba); 1412 lpfc_sli_brdrestart(phba); 1413 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1414 lpfc_unblock_mgmt_io(phba); 1415 return; 1416 } 1417 lpfc_unblock_mgmt_io(phba); 1418 } else if (phba->work_hs & HS_CRIT_TEMP) { 1419 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1420 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1421 temp_event_data.event_code = LPFC_CRIT_TEMP; 1422 temp_event_data.data = (uint32_t)temperature; 1423 1424 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1425 "0406 Adapter maximum temperature exceeded " 1426 "(%ld), taking this port offline " 1427 "Data: x%x x%x x%x\n", 1428 temperature, phba->work_hs, 1429 phba->work_status[0], phba->work_status[1]); 1430 1431 shost = lpfc_shost_from_vport(phba->pport); 1432 fc_host_post_vendor_event(shost, fc_get_event_number(), 1433 sizeof(temp_event_data), 1434 (char *) &temp_event_data, 1435 SCSI_NL_VID_TYPE_PCI 1436 | PCI_VENDOR_ID_EMULEX); 1437 1438 spin_lock_irq(&phba->hbalock); 1439 phba->over_temp_state = HBA_OVER_TEMP; 1440 spin_unlock_irq(&phba->hbalock); 1441 lpfc_offline_eratt(phba); 1442 1443 } else { 1444 /* The if clause above forces this code path when the status 1445 * failure is a value other than FFER6. Do not call the offline 1446 * twice. This is the adapter hardware error path. 1447 */ 1448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1449 "0457 Adapter Hardware Error " 1450 "Data: x%x x%x x%x\n", 1451 phba->work_hs, 1452 phba->work_status[0], phba->work_status[1]); 1453 1454 event_data = FC_REG_DUMP_EVENT; 1455 shost = lpfc_shost_from_vport(vport); 1456 fc_host_post_vendor_event(shost, fc_get_event_number(), 1457 sizeof(event_data), (char *) &event_data, 1458 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1459 1460 lpfc_offline_eratt(phba); 1461 } 1462 return; 1463 } 1464 1465 /** 1466 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1467 * @phba: pointer to lpfc hba data structure. 1468 * @mbx_action: flag for mailbox shutdown action. 1469 * 1470 * This routine is invoked to perform an SLI4 port PCI function reset in 1471 * response to port status register polling attention. It waits for port 1472 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1473 * During this process, interrupt vectors are freed and later requested 1474 * for handling possible port resource change. 1475 **/ 1476 static int 1477 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1478 bool en_rn_msg) 1479 { 1480 int rc; 1481 uint32_t intr_mode; 1482 1483 /* 1484 * On error status condition, driver need to wait for port 1485 * ready before performing reset. 1486 */ 1487 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1488 if (!rc) { 1489 /* need reset: attempt for port recovery */ 1490 if (en_rn_msg) 1491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1492 "2887 Reset Needed: Attempting Port " 1493 "Recovery...\n"); 1494 lpfc_offline_prep(phba, mbx_action); 1495 lpfc_offline(phba); 1496 /* release interrupt for possible resource change */ 1497 lpfc_sli4_disable_intr(phba); 1498 lpfc_sli_brdrestart(phba); 1499 /* request and enable interrupt */ 1500 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1501 if (intr_mode == LPFC_INTR_ERROR) { 1502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1503 "3175 Failed to enable interrupt\n"); 1504 return -EIO; 1505 } else { 1506 phba->intr_mode = intr_mode; 1507 } 1508 rc = lpfc_online(phba); 1509 if (rc == 0) 1510 lpfc_unblock_mgmt_io(phba); 1511 } 1512 return rc; 1513 } 1514 1515 /** 1516 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1517 * @phba: pointer to lpfc hba data structure. 1518 * 1519 * This routine is invoked to handle the SLI4 HBA hardware error attention 1520 * conditions. 1521 **/ 1522 static void 1523 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1524 { 1525 struct lpfc_vport *vport = phba->pport; 1526 uint32_t event_data; 1527 struct Scsi_Host *shost; 1528 uint32_t if_type; 1529 struct lpfc_register portstat_reg = {0}; 1530 uint32_t reg_err1, reg_err2; 1531 uint32_t uerrlo_reg, uemasklo_reg; 1532 uint32_t pci_rd_rc1, pci_rd_rc2; 1533 bool en_rn_msg = true; 1534 int rc; 1535 1536 /* If the pci channel is offline, ignore possible errors, since 1537 * we cannot communicate with the pci card anyway. 1538 */ 1539 if (pci_channel_offline(phba->pcidev)) 1540 return; 1541 /* If resets are disabled then leave the HBA alone and return */ 1542 if (!phba->cfg_enable_hba_reset) 1543 return; 1544 1545 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1546 switch (if_type) { 1547 case LPFC_SLI_INTF_IF_TYPE_0: 1548 pci_rd_rc1 = lpfc_readl( 1549 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1550 &uerrlo_reg); 1551 pci_rd_rc2 = lpfc_readl( 1552 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1553 &uemasklo_reg); 1554 /* consider PCI bus read error as pci_channel_offline */ 1555 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1556 return; 1557 lpfc_sli4_offline_eratt(phba); 1558 break; 1559 case LPFC_SLI_INTF_IF_TYPE_2: 1560 pci_rd_rc1 = lpfc_readl( 1561 phba->sli4_hba.u.if_type2.STATUSregaddr, 1562 &portstat_reg.word0); 1563 /* consider PCI bus read error as pci_channel_offline */ 1564 if (pci_rd_rc1 == -EIO) { 1565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1566 "3151 PCI bus read access failure: x%x\n", 1567 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1568 return; 1569 } 1570 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1571 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1572 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1573 /* TODO: Register for Overtemp async events. */ 1574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1575 "2889 Port Overtemperature event, " 1576 "taking port offline\n"); 1577 spin_lock_irq(&phba->hbalock); 1578 phba->over_temp_state = HBA_OVER_TEMP; 1579 spin_unlock_irq(&phba->hbalock); 1580 lpfc_sli4_offline_eratt(phba); 1581 break; 1582 } 1583 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1584 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1586 "3143 Port Down: Firmware Update " 1587 "Detected\n"); 1588 en_rn_msg = false; 1589 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1590 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1591 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1592 "3144 Port Down: Debug Dump\n"); 1593 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1594 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1596 "3145 Port Down: Provisioning\n"); 1597 1598 /* Check port status register for function reset */ 1599 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1600 en_rn_msg); 1601 if (rc == 0) { 1602 /* don't report event on forced debug dump */ 1603 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1604 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1605 return; 1606 else 1607 break; 1608 } 1609 /* fall through for not able to recover */ 1610 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1611 "3152 Unrecoverable error, bring the port " 1612 "offline\n"); 1613 lpfc_sli4_offline_eratt(phba); 1614 break; 1615 case LPFC_SLI_INTF_IF_TYPE_1: 1616 default: 1617 break; 1618 } 1619 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1620 "3123 Report dump event to upper layer\n"); 1621 /* Send an internal error event to mgmt application */ 1622 lpfc_board_errevt_to_mgmt(phba); 1623 1624 event_data = FC_REG_DUMP_EVENT; 1625 shost = lpfc_shost_from_vport(vport); 1626 fc_host_post_vendor_event(shost, fc_get_event_number(), 1627 sizeof(event_data), (char *) &event_data, 1628 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1629 } 1630 1631 /** 1632 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1633 * @phba: pointer to lpfc HBA data structure. 1634 * 1635 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1636 * routine from the API jump table function pointer from the lpfc_hba struct. 1637 * 1638 * Return codes 1639 * 0 - success. 1640 * Any other value - error. 1641 **/ 1642 void 1643 lpfc_handle_eratt(struct lpfc_hba *phba) 1644 { 1645 (*phba->lpfc_handle_eratt)(phba); 1646 } 1647 1648 /** 1649 * lpfc_handle_latt - The HBA link event handler 1650 * @phba: pointer to lpfc hba data structure. 1651 * 1652 * This routine is invoked from the worker thread to handle a HBA host 1653 * attention link event. 1654 **/ 1655 void 1656 lpfc_handle_latt(struct lpfc_hba *phba) 1657 { 1658 struct lpfc_vport *vport = phba->pport; 1659 struct lpfc_sli *psli = &phba->sli; 1660 LPFC_MBOXQ_t *pmb; 1661 volatile uint32_t control; 1662 struct lpfc_dmabuf *mp; 1663 int rc = 0; 1664 1665 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1666 if (!pmb) { 1667 rc = 1; 1668 goto lpfc_handle_latt_err_exit; 1669 } 1670 1671 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1672 if (!mp) { 1673 rc = 2; 1674 goto lpfc_handle_latt_free_pmb; 1675 } 1676 1677 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1678 if (!mp->virt) { 1679 rc = 3; 1680 goto lpfc_handle_latt_free_mp; 1681 } 1682 1683 /* Cleanup any outstanding ELS commands */ 1684 lpfc_els_flush_all_cmd(phba); 1685 1686 psli->slistat.link_event++; 1687 lpfc_read_topology(phba, pmb, mp); 1688 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1689 pmb->vport = vport; 1690 /* Block ELS IOCBs until we have processed this mbox command */ 1691 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1692 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1693 if (rc == MBX_NOT_FINISHED) { 1694 rc = 4; 1695 goto lpfc_handle_latt_free_mbuf; 1696 } 1697 1698 /* Clear Link Attention in HA REG */ 1699 spin_lock_irq(&phba->hbalock); 1700 writel(HA_LATT, phba->HAregaddr); 1701 readl(phba->HAregaddr); /* flush */ 1702 spin_unlock_irq(&phba->hbalock); 1703 1704 return; 1705 1706 lpfc_handle_latt_free_mbuf: 1707 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1708 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1709 lpfc_handle_latt_free_mp: 1710 kfree(mp); 1711 lpfc_handle_latt_free_pmb: 1712 mempool_free(pmb, phba->mbox_mem_pool); 1713 lpfc_handle_latt_err_exit: 1714 /* Enable Link attention interrupts */ 1715 spin_lock_irq(&phba->hbalock); 1716 psli->sli_flag |= LPFC_PROCESS_LA; 1717 control = readl(phba->HCregaddr); 1718 control |= HC_LAINT_ENA; 1719 writel(control, phba->HCregaddr); 1720 readl(phba->HCregaddr); /* flush */ 1721 1722 /* Clear Link Attention in HA REG */ 1723 writel(HA_LATT, phba->HAregaddr); 1724 readl(phba->HAregaddr); /* flush */ 1725 spin_unlock_irq(&phba->hbalock); 1726 lpfc_linkdown(phba); 1727 phba->link_state = LPFC_HBA_ERROR; 1728 1729 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1730 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1731 1732 return; 1733 } 1734 1735 /** 1736 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1737 * @phba: pointer to lpfc hba data structure. 1738 * @vpd: pointer to the vital product data. 1739 * @len: length of the vital product data in bytes. 1740 * 1741 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1742 * an array of characters. In this routine, the ModelName, ProgramType, and 1743 * ModelDesc, etc. fields of the phba data structure will be populated. 1744 * 1745 * Return codes 1746 * 0 - pointer to the VPD passed in is NULL 1747 * 1 - success 1748 **/ 1749 int 1750 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1751 { 1752 uint8_t lenlo, lenhi; 1753 int Length; 1754 int i, j; 1755 int finished = 0; 1756 int index = 0; 1757 1758 if (!vpd) 1759 return 0; 1760 1761 /* Vital Product */ 1762 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1763 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1764 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1765 (uint32_t) vpd[3]); 1766 while (!finished && (index < (len - 4))) { 1767 switch (vpd[index]) { 1768 case 0x82: 1769 case 0x91: 1770 index += 1; 1771 lenlo = vpd[index]; 1772 index += 1; 1773 lenhi = vpd[index]; 1774 index += 1; 1775 i = ((((unsigned short)lenhi) << 8) + lenlo); 1776 index += i; 1777 break; 1778 case 0x90: 1779 index += 1; 1780 lenlo = vpd[index]; 1781 index += 1; 1782 lenhi = vpd[index]; 1783 index += 1; 1784 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1785 if (Length > len - index) 1786 Length = len - index; 1787 while (Length > 0) { 1788 /* Look for Serial Number */ 1789 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1790 index += 2; 1791 i = vpd[index]; 1792 index += 1; 1793 j = 0; 1794 Length -= (3+i); 1795 while(i--) { 1796 phba->SerialNumber[j++] = vpd[index++]; 1797 if (j == 31) 1798 break; 1799 } 1800 phba->SerialNumber[j] = 0; 1801 continue; 1802 } 1803 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1804 phba->vpd_flag |= VPD_MODEL_DESC; 1805 index += 2; 1806 i = vpd[index]; 1807 index += 1; 1808 j = 0; 1809 Length -= (3+i); 1810 while(i--) { 1811 phba->ModelDesc[j++] = vpd[index++]; 1812 if (j == 255) 1813 break; 1814 } 1815 phba->ModelDesc[j] = 0; 1816 continue; 1817 } 1818 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1819 phba->vpd_flag |= VPD_MODEL_NAME; 1820 index += 2; 1821 i = vpd[index]; 1822 index += 1; 1823 j = 0; 1824 Length -= (3+i); 1825 while(i--) { 1826 phba->ModelName[j++] = vpd[index++]; 1827 if (j == 79) 1828 break; 1829 } 1830 phba->ModelName[j] = 0; 1831 continue; 1832 } 1833 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1834 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1835 index += 2; 1836 i = vpd[index]; 1837 index += 1; 1838 j = 0; 1839 Length -= (3+i); 1840 while(i--) { 1841 phba->ProgramType[j++] = vpd[index++]; 1842 if (j == 255) 1843 break; 1844 } 1845 phba->ProgramType[j] = 0; 1846 continue; 1847 } 1848 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1849 phba->vpd_flag |= VPD_PORT; 1850 index += 2; 1851 i = vpd[index]; 1852 index += 1; 1853 j = 0; 1854 Length -= (3+i); 1855 while(i--) { 1856 if ((phba->sli_rev == LPFC_SLI_REV4) && 1857 (phba->sli4_hba.pport_name_sta == 1858 LPFC_SLI4_PPNAME_GET)) { 1859 j++; 1860 index++; 1861 } else 1862 phba->Port[j++] = vpd[index++]; 1863 if (j == 19) 1864 break; 1865 } 1866 if ((phba->sli_rev != LPFC_SLI_REV4) || 1867 (phba->sli4_hba.pport_name_sta == 1868 LPFC_SLI4_PPNAME_NON)) 1869 phba->Port[j] = 0; 1870 continue; 1871 } 1872 else { 1873 index += 2; 1874 i = vpd[index]; 1875 index += 1; 1876 index += i; 1877 Length -= (3 + i); 1878 } 1879 } 1880 finished = 0; 1881 break; 1882 case 0x78: 1883 finished = 1; 1884 break; 1885 default: 1886 index ++; 1887 break; 1888 } 1889 } 1890 1891 return(1); 1892 } 1893 1894 /** 1895 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1896 * @phba: pointer to lpfc hba data structure. 1897 * @mdp: pointer to the data structure to hold the derived model name. 1898 * @descp: pointer to the data structure to hold the derived description. 1899 * 1900 * This routine retrieves HBA's description based on its registered PCI device 1901 * ID. The @descp passed into this function points to an array of 256 chars. It 1902 * shall be returned with the model name, maximum speed, and the host bus type. 1903 * The @mdp passed into this function points to an array of 80 chars. When the 1904 * function returns, the @mdp will be filled with the model name. 1905 **/ 1906 static void 1907 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1908 { 1909 lpfc_vpd_t *vp; 1910 uint16_t dev_id = phba->pcidev->device; 1911 int max_speed; 1912 int GE = 0; 1913 int oneConnect = 0; /* default is not a oneConnect */ 1914 struct { 1915 char *name; 1916 char *bus; 1917 char *function; 1918 } m = {"<Unknown>", "", ""}; 1919 1920 if (mdp && mdp[0] != '\0' 1921 && descp && descp[0] != '\0') 1922 return; 1923 1924 if (phba->lmt & LMT_16Gb) 1925 max_speed = 16; 1926 else if (phba->lmt & LMT_10Gb) 1927 max_speed = 10; 1928 else if (phba->lmt & LMT_8Gb) 1929 max_speed = 8; 1930 else if (phba->lmt & LMT_4Gb) 1931 max_speed = 4; 1932 else if (phba->lmt & LMT_2Gb) 1933 max_speed = 2; 1934 else if (phba->lmt & LMT_1Gb) 1935 max_speed = 1; 1936 else 1937 max_speed = 0; 1938 1939 vp = &phba->vpd; 1940 1941 switch (dev_id) { 1942 case PCI_DEVICE_ID_FIREFLY: 1943 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1944 break; 1945 case PCI_DEVICE_ID_SUPERFLY: 1946 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1947 m = (typeof(m)){"LP7000", "PCI", 1948 "Fibre Channel Adapter"}; 1949 else 1950 m = (typeof(m)){"LP7000E", "PCI", 1951 "Fibre Channel Adapter"}; 1952 break; 1953 case PCI_DEVICE_ID_DRAGONFLY: 1954 m = (typeof(m)){"LP8000", "PCI", 1955 "Fibre Channel Adapter"}; 1956 break; 1957 case PCI_DEVICE_ID_CENTAUR: 1958 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1959 m = (typeof(m)){"LP9002", "PCI", 1960 "Fibre Channel Adapter"}; 1961 else 1962 m = (typeof(m)){"LP9000", "PCI", 1963 "Fibre Channel Adapter"}; 1964 break; 1965 case PCI_DEVICE_ID_RFLY: 1966 m = (typeof(m)){"LP952", "PCI", 1967 "Fibre Channel Adapter"}; 1968 break; 1969 case PCI_DEVICE_ID_PEGASUS: 1970 m = (typeof(m)){"LP9802", "PCI-X", 1971 "Fibre Channel Adapter"}; 1972 break; 1973 case PCI_DEVICE_ID_THOR: 1974 m = (typeof(m)){"LP10000", "PCI-X", 1975 "Fibre Channel Adapter"}; 1976 break; 1977 case PCI_DEVICE_ID_VIPER: 1978 m = (typeof(m)){"LPX1000", "PCI-X", 1979 "Fibre Channel Adapter"}; 1980 break; 1981 case PCI_DEVICE_ID_PFLY: 1982 m = (typeof(m)){"LP982", "PCI-X", 1983 "Fibre Channel Adapter"}; 1984 break; 1985 case PCI_DEVICE_ID_TFLY: 1986 m = (typeof(m)){"LP1050", "PCI-X", 1987 "Fibre Channel Adapter"}; 1988 break; 1989 case PCI_DEVICE_ID_HELIOS: 1990 m = (typeof(m)){"LP11000", "PCI-X2", 1991 "Fibre Channel Adapter"}; 1992 break; 1993 case PCI_DEVICE_ID_HELIOS_SCSP: 1994 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1995 "Fibre Channel Adapter"}; 1996 break; 1997 case PCI_DEVICE_ID_HELIOS_DCSP: 1998 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1999 "Fibre Channel Adapter"}; 2000 break; 2001 case PCI_DEVICE_ID_NEPTUNE: 2002 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 2003 break; 2004 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2005 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 2006 break; 2007 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2008 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 2009 break; 2010 case PCI_DEVICE_ID_BMID: 2011 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2012 break; 2013 case PCI_DEVICE_ID_BSMB: 2014 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 2015 break; 2016 case PCI_DEVICE_ID_ZEPHYR: 2017 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2018 break; 2019 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2020 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2021 break; 2022 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2023 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2024 GE = 1; 2025 break; 2026 case PCI_DEVICE_ID_ZMID: 2027 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2028 break; 2029 case PCI_DEVICE_ID_ZSMB: 2030 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2031 break; 2032 case PCI_DEVICE_ID_LP101: 2033 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 2034 break; 2035 case PCI_DEVICE_ID_LP10000S: 2036 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 2037 break; 2038 case PCI_DEVICE_ID_LP11000S: 2039 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 2040 break; 2041 case PCI_DEVICE_ID_LPE11000S: 2042 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 2043 break; 2044 case PCI_DEVICE_ID_SAT: 2045 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2046 break; 2047 case PCI_DEVICE_ID_SAT_MID: 2048 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2049 break; 2050 case PCI_DEVICE_ID_SAT_SMB: 2051 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2052 break; 2053 case PCI_DEVICE_ID_SAT_DCSP: 2054 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2055 break; 2056 case PCI_DEVICE_ID_SAT_SCSP: 2057 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2058 break; 2059 case PCI_DEVICE_ID_SAT_S: 2060 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2061 break; 2062 case PCI_DEVICE_ID_HORNET: 2063 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 2064 GE = 1; 2065 break; 2066 case PCI_DEVICE_ID_PROTEUS_VF: 2067 m = (typeof(m)){"LPev12000", "PCIe IOV", 2068 "Fibre Channel Adapter"}; 2069 break; 2070 case PCI_DEVICE_ID_PROTEUS_PF: 2071 m = (typeof(m)){"LPev12000", "PCIe IOV", 2072 "Fibre Channel Adapter"}; 2073 break; 2074 case PCI_DEVICE_ID_PROTEUS_S: 2075 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2076 "Fibre Channel Adapter"}; 2077 break; 2078 case PCI_DEVICE_ID_TIGERSHARK: 2079 oneConnect = 1; 2080 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2081 break; 2082 case PCI_DEVICE_ID_TOMCAT: 2083 oneConnect = 1; 2084 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2085 break; 2086 case PCI_DEVICE_ID_FALCON: 2087 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2088 "EmulexSecure Fibre"}; 2089 break; 2090 case PCI_DEVICE_ID_BALIUS: 2091 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2092 "Fibre Channel Adapter"}; 2093 break; 2094 case PCI_DEVICE_ID_LANCER_FC: 2095 case PCI_DEVICE_ID_LANCER_FC_VF: 2096 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2097 break; 2098 case PCI_DEVICE_ID_LANCER_FCOE: 2099 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2100 oneConnect = 1; 2101 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2102 break; 2103 case PCI_DEVICE_ID_SKYHAWK: 2104 case PCI_DEVICE_ID_SKYHAWK_VF: 2105 oneConnect = 1; 2106 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2107 break; 2108 default: 2109 m = (typeof(m)){"Unknown", "", ""}; 2110 break; 2111 } 2112 2113 if (mdp && mdp[0] == '\0') 2114 snprintf(mdp, 79,"%s", m.name); 2115 /* 2116 * oneConnect hba requires special processing, they are all initiators 2117 * and we put the port number on the end 2118 */ 2119 if (descp && descp[0] == '\0') { 2120 if (oneConnect) 2121 snprintf(descp, 255, 2122 "Emulex OneConnect %s, %s Initiator %s", 2123 m.name, m.function, 2124 phba->Port); 2125 else if (max_speed == 0) 2126 snprintf(descp, 255, 2127 "Emulex %s %s %s ", 2128 m.name, m.bus, m.function); 2129 else 2130 snprintf(descp, 255, 2131 "Emulex %s %d%s %s %s", 2132 m.name, max_speed, (GE) ? "GE" : "Gb", 2133 m.bus, m.function); 2134 } 2135 } 2136 2137 /** 2138 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2139 * @phba: pointer to lpfc hba data structure. 2140 * @pring: pointer to a IOCB ring. 2141 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2142 * 2143 * This routine posts a given number of IOCBs with the associated DMA buffer 2144 * descriptors specified by the cnt argument to the given IOCB ring. 2145 * 2146 * Return codes 2147 * The number of IOCBs NOT able to be posted to the IOCB ring. 2148 **/ 2149 int 2150 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2151 { 2152 IOCB_t *icmd; 2153 struct lpfc_iocbq *iocb; 2154 struct lpfc_dmabuf *mp1, *mp2; 2155 2156 cnt += pring->missbufcnt; 2157 2158 /* While there are buffers to post */ 2159 while (cnt > 0) { 2160 /* Allocate buffer for command iocb */ 2161 iocb = lpfc_sli_get_iocbq(phba); 2162 if (iocb == NULL) { 2163 pring->missbufcnt = cnt; 2164 return cnt; 2165 } 2166 icmd = &iocb->iocb; 2167 2168 /* 2 buffers can be posted per command */ 2169 /* Allocate buffer to post */ 2170 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2171 if (mp1) 2172 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2173 if (!mp1 || !mp1->virt) { 2174 kfree(mp1); 2175 lpfc_sli_release_iocbq(phba, iocb); 2176 pring->missbufcnt = cnt; 2177 return cnt; 2178 } 2179 2180 INIT_LIST_HEAD(&mp1->list); 2181 /* Allocate buffer to post */ 2182 if (cnt > 1) { 2183 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2184 if (mp2) 2185 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2186 &mp2->phys); 2187 if (!mp2 || !mp2->virt) { 2188 kfree(mp2); 2189 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2190 kfree(mp1); 2191 lpfc_sli_release_iocbq(phba, iocb); 2192 pring->missbufcnt = cnt; 2193 return cnt; 2194 } 2195 2196 INIT_LIST_HEAD(&mp2->list); 2197 } else { 2198 mp2 = NULL; 2199 } 2200 2201 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2202 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2203 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2204 icmd->ulpBdeCount = 1; 2205 cnt--; 2206 if (mp2) { 2207 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2208 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2209 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2210 cnt--; 2211 icmd->ulpBdeCount = 2; 2212 } 2213 2214 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2215 icmd->ulpLe = 1; 2216 2217 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2218 IOCB_ERROR) { 2219 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2220 kfree(mp1); 2221 cnt++; 2222 if (mp2) { 2223 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2224 kfree(mp2); 2225 cnt++; 2226 } 2227 lpfc_sli_release_iocbq(phba, iocb); 2228 pring->missbufcnt = cnt; 2229 return cnt; 2230 } 2231 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2232 if (mp2) 2233 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2234 } 2235 pring->missbufcnt = 0; 2236 return 0; 2237 } 2238 2239 /** 2240 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2241 * @phba: pointer to lpfc hba data structure. 2242 * 2243 * This routine posts initial receive IOCB buffers to the ELS ring. The 2244 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2245 * set to 64 IOCBs. 2246 * 2247 * Return codes 2248 * 0 - success (currently always success) 2249 **/ 2250 static int 2251 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2252 { 2253 struct lpfc_sli *psli = &phba->sli; 2254 2255 /* Ring 0, ELS / CT buffers */ 2256 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2257 /* Ring 2 - FCP no buffers needed */ 2258 2259 return 0; 2260 } 2261 2262 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2263 2264 /** 2265 * lpfc_sha_init - Set up initial array of hash table entries 2266 * @HashResultPointer: pointer to an array as hash table. 2267 * 2268 * This routine sets up the initial values to the array of hash table entries 2269 * for the LC HBAs. 2270 **/ 2271 static void 2272 lpfc_sha_init(uint32_t * HashResultPointer) 2273 { 2274 HashResultPointer[0] = 0x67452301; 2275 HashResultPointer[1] = 0xEFCDAB89; 2276 HashResultPointer[2] = 0x98BADCFE; 2277 HashResultPointer[3] = 0x10325476; 2278 HashResultPointer[4] = 0xC3D2E1F0; 2279 } 2280 2281 /** 2282 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2283 * @HashResultPointer: pointer to an initial/result hash table. 2284 * @HashWorkingPointer: pointer to an working hash table. 2285 * 2286 * This routine iterates an initial hash table pointed by @HashResultPointer 2287 * with the values from the working hash table pointeed by @HashWorkingPointer. 2288 * The results are putting back to the initial hash table, returned through 2289 * the @HashResultPointer as the result hash table. 2290 **/ 2291 static void 2292 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2293 { 2294 int t; 2295 uint32_t TEMP; 2296 uint32_t A, B, C, D, E; 2297 t = 16; 2298 do { 2299 HashWorkingPointer[t] = 2300 S(1, 2301 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2302 8] ^ 2303 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2304 } while (++t <= 79); 2305 t = 0; 2306 A = HashResultPointer[0]; 2307 B = HashResultPointer[1]; 2308 C = HashResultPointer[2]; 2309 D = HashResultPointer[3]; 2310 E = HashResultPointer[4]; 2311 2312 do { 2313 if (t < 20) { 2314 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2315 } else if (t < 40) { 2316 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2317 } else if (t < 60) { 2318 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2319 } else { 2320 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2321 } 2322 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2323 E = D; 2324 D = C; 2325 C = S(30, B); 2326 B = A; 2327 A = TEMP; 2328 } while (++t <= 79); 2329 2330 HashResultPointer[0] += A; 2331 HashResultPointer[1] += B; 2332 HashResultPointer[2] += C; 2333 HashResultPointer[3] += D; 2334 HashResultPointer[4] += E; 2335 2336 } 2337 2338 /** 2339 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2340 * @RandomChallenge: pointer to the entry of host challenge random number array. 2341 * @HashWorking: pointer to the entry of the working hash array. 2342 * 2343 * This routine calculates the working hash array referred by @HashWorking 2344 * from the challenge random numbers associated with the host, referred by 2345 * @RandomChallenge. The result is put into the entry of the working hash 2346 * array and returned by reference through @HashWorking. 2347 **/ 2348 static void 2349 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2350 { 2351 *HashWorking = (*RandomChallenge ^ *HashWorking); 2352 } 2353 2354 /** 2355 * lpfc_hba_init - Perform special handling for LC HBA initialization 2356 * @phba: pointer to lpfc hba data structure. 2357 * @hbainit: pointer to an array of unsigned 32-bit integers. 2358 * 2359 * This routine performs the special handling for LC HBA initialization. 2360 **/ 2361 void 2362 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2363 { 2364 int t; 2365 uint32_t *HashWorking; 2366 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2367 2368 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2369 if (!HashWorking) 2370 return; 2371 2372 HashWorking[0] = HashWorking[78] = *pwwnn++; 2373 HashWorking[1] = HashWorking[79] = *pwwnn; 2374 2375 for (t = 0; t < 7; t++) 2376 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2377 2378 lpfc_sha_init(hbainit); 2379 lpfc_sha_iterate(hbainit, HashWorking); 2380 kfree(HashWorking); 2381 } 2382 2383 /** 2384 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2385 * @vport: pointer to a virtual N_Port data structure. 2386 * 2387 * This routine performs the necessary cleanups before deleting the @vport. 2388 * It invokes the discovery state machine to perform necessary state 2389 * transitions and to release the ndlps associated with the @vport. Note, 2390 * the physical port is treated as @vport 0. 2391 **/ 2392 void 2393 lpfc_cleanup(struct lpfc_vport *vport) 2394 { 2395 struct lpfc_hba *phba = vport->phba; 2396 struct lpfc_nodelist *ndlp, *next_ndlp; 2397 int i = 0; 2398 2399 if (phba->link_state > LPFC_LINK_DOWN) 2400 lpfc_port_link_failure(vport); 2401 2402 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2403 if (!NLP_CHK_NODE_ACT(ndlp)) { 2404 ndlp = lpfc_enable_node(vport, ndlp, 2405 NLP_STE_UNUSED_NODE); 2406 if (!ndlp) 2407 continue; 2408 spin_lock_irq(&phba->ndlp_lock); 2409 NLP_SET_FREE_REQ(ndlp); 2410 spin_unlock_irq(&phba->ndlp_lock); 2411 /* Trigger the release of the ndlp memory */ 2412 lpfc_nlp_put(ndlp); 2413 continue; 2414 } 2415 spin_lock_irq(&phba->ndlp_lock); 2416 if (NLP_CHK_FREE_REQ(ndlp)) { 2417 /* The ndlp should not be in memory free mode already */ 2418 spin_unlock_irq(&phba->ndlp_lock); 2419 continue; 2420 } else 2421 /* Indicate request for freeing ndlp memory */ 2422 NLP_SET_FREE_REQ(ndlp); 2423 spin_unlock_irq(&phba->ndlp_lock); 2424 2425 if (vport->port_type != LPFC_PHYSICAL_PORT && 2426 ndlp->nlp_DID == Fabric_DID) { 2427 /* Just free up ndlp with Fabric_DID for vports */ 2428 lpfc_nlp_put(ndlp); 2429 continue; 2430 } 2431 2432 /* take care of nodes in unused state before the state 2433 * machine taking action. 2434 */ 2435 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2436 lpfc_nlp_put(ndlp); 2437 continue; 2438 } 2439 2440 if (ndlp->nlp_type & NLP_FABRIC) 2441 lpfc_disc_state_machine(vport, ndlp, NULL, 2442 NLP_EVT_DEVICE_RECOVERY); 2443 2444 lpfc_disc_state_machine(vport, ndlp, NULL, 2445 NLP_EVT_DEVICE_RM); 2446 } 2447 2448 /* At this point, ALL ndlp's should be gone 2449 * because of the previous NLP_EVT_DEVICE_RM. 2450 * Lets wait for this to happen, if needed. 2451 */ 2452 while (!list_empty(&vport->fc_nodes)) { 2453 if (i++ > 3000) { 2454 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2455 "0233 Nodelist not empty\n"); 2456 list_for_each_entry_safe(ndlp, next_ndlp, 2457 &vport->fc_nodes, nlp_listp) { 2458 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2459 LOG_NODE, 2460 "0282 did:x%x ndlp:x%p " 2461 "usgmap:x%x refcnt:%d\n", 2462 ndlp->nlp_DID, (void *)ndlp, 2463 ndlp->nlp_usg_map, 2464 atomic_read( 2465 &ndlp->kref.refcount)); 2466 } 2467 break; 2468 } 2469 2470 /* Wait for any activity on ndlps to settle */ 2471 msleep(10); 2472 } 2473 lpfc_cleanup_vports_rrqs(vport, NULL); 2474 } 2475 2476 /** 2477 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2478 * @vport: pointer to a virtual N_Port data structure. 2479 * 2480 * This routine stops all the timers associated with a @vport. This function 2481 * is invoked before disabling or deleting a @vport. Note that the physical 2482 * port is treated as @vport 0. 2483 **/ 2484 void 2485 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2486 { 2487 del_timer_sync(&vport->els_tmofunc); 2488 del_timer_sync(&vport->fc_fdmitmo); 2489 del_timer_sync(&vport->delayed_disc_tmo); 2490 lpfc_can_disctmo(vport); 2491 return; 2492 } 2493 2494 /** 2495 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2496 * @phba: pointer to lpfc hba data structure. 2497 * 2498 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2499 * caller of this routine should already hold the host lock. 2500 **/ 2501 void 2502 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2503 { 2504 /* Clear pending FCF rediscovery wait flag */ 2505 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2506 2507 /* Now, try to stop the timer */ 2508 del_timer(&phba->fcf.redisc_wait); 2509 } 2510 2511 /** 2512 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2513 * @phba: pointer to lpfc hba data structure. 2514 * 2515 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2516 * checks whether the FCF rediscovery wait timer is pending with the host 2517 * lock held before proceeding with disabling the timer and clearing the 2518 * wait timer pendig flag. 2519 **/ 2520 void 2521 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2522 { 2523 spin_lock_irq(&phba->hbalock); 2524 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2525 /* FCF rediscovery timer already fired or stopped */ 2526 spin_unlock_irq(&phba->hbalock); 2527 return; 2528 } 2529 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2530 /* Clear failover in progress flags */ 2531 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2532 spin_unlock_irq(&phba->hbalock); 2533 } 2534 2535 /** 2536 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2537 * @phba: pointer to lpfc hba data structure. 2538 * 2539 * This routine stops all the timers associated with a HBA. This function is 2540 * invoked before either putting a HBA offline or unloading the driver. 2541 **/ 2542 void 2543 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2544 { 2545 lpfc_stop_vport_timers(phba->pport); 2546 del_timer_sync(&phba->sli.mbox_tmo); 2547 del_timer_sync(&phba->fabric_block_timer); 2548 del_timer_sync(&phba->eratt_poll); 2549 del_timer_sync(&phba->hb_tmofunc); 2550 if (phba->sli_rev == LPFC_SLI_REV4) { 2551 del_timer_sync(&phba->rrq_tmr); 2552 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2553 } 2554 phba->hb_outstanding = 0; 2555 2556 switch (phba->pci_dev_grp) { 2557 case LPFC_PCI_DEV_LP: 2558 /* Stop any LightPulse device specific driver timers */ 2559 del_timer_sync(&phba->fcp_poll_timer); 2560 break; 2561 case LPFC_PCI_DEV_OC: 2562 /* Stop any OneConnect device sepcific driver timers */ 2563 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2564 break; 2565 default: 2566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2567 "0297 Invalid device group (x%x)\n", 2568 phba->pci_dev_grp); 2569 break; 2570 } 2571 return; 2572 } 2573 2574 /** 2575 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2576 * @phba: pointer to lpfc hba data structure. 2577 * 2578 * This routine marks a HBA's management interface as blocked. Once the HBA's 2579 * management interface is marked as blocked, all the user space access to 2580 * the HBA, whether they are from sysfs interface or libdfc interface will 2581 * all be blocked. The HBA is set to block the management interface when the 2582 * driver prepares the HBA interface for online or offline. 2583 **/ 2584 static void 2585 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2586 { 2587 unsigned long iflag; 2588 uint8_t actcmd = MBX_HEARTBEAT; 2589 unsigned long timeout; 2590 2591 spin_lock_irqsave(&phba->hbalock, iflag); 2592 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2593 spin_unlock_irqrestore(&phba->hbalock, iflag); 2594 if (mbx_action == LPFC_MBX_NO_WAIT) 2595 return; 2596 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2597 spin_lock_irqsave(&phba->hbalock, iflag); 2598 if (phba->sli.mbox_active) { 2599 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2600 /* Determine how long we might wait for the active mailbox 2601 * command to be gracefully completed by firmware. 2602 */ 2603 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2604 phba->sli.mbox_active) * 1000) + jiffies; 2605 } 2606 spin_unlock_irqrestore(&phba->hbalock, iflag); 2607 2608 /* Wait for the outstnading mailbox command to complete */ 2609 while (phba->sli.mbox_active) { 2610 /* Check active mailbox complete status every 2ms */ 2611 msleep(2); 2612 if (time_after(jiffies, timeout)) { 2613 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2614 "2813 Mgmt IO is Blocked %x " 2615 "- mbox cmd %x still active\n", 2616 phba->sli.sli_flag, actcmd); 2617 break; 2618 } 2619 } 2620 } 2621 2622 /** 2623 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 2624 * @phba: pointer to lpfc hba data structure. 2625 * 2626 * Allocate RPIs for all active remote nodes. This is needed whenever 2627 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 2628 * is to fixup the temporary rpi assignments. 2629 **/ 2630 void 2631 lpfc_sli4_node_prep(struct lpfc_hba *phba) 2632 { 2633 struct lpfc_nodelist *ndlp, *next_ndlp; 2634 struct lpfc_vport **vports; 2635 int i; 2636 2637 if (phba->sli_rev != LPFC_SLI_REV4) 2638 return; 2639 2640 vports = lpfc_create_vport_work_array(phba); 2641 if (vports != NULL) { 2642 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2643 if (vports[i]->load_flag & FC_UNLOADING) 2644 continue; 2645 2646 list_for_each_entry_safe(ndlp, next_ndlp, 2647 &vports[i]->fc_nodes, 2648 nlp_listp) { 2649 if (NLP_CHK_NODE_ACT(ndlp)) 2650 ndlp->nlp_rpi = 2651 lpfc_sli4_alloc_rpi(phba); 2652 } 2653 } 2654 } 2655 lpfc_destroy_vport_work_array(phba, vports); 2656 } 2657 2658 /** 2659 * lpfc_online - Initialize and bring a HBA online 2660 * @phba: pointer to lpfc hba data structure. 2661 * 2662 * This routine initializes the HBA and brings a HBA online. During this 2663 * process, the management interface is blocked to prevent user space access 2664 * to the HBA interfering with the driver initialization. 2665 * 2666 * Return codes 2667 * 0 - successful 2668 * 1 - failed 2669 **/ 2670 int 2671 lpfc_online(struct lpfc_hba *phba) 2672 { 2673 struct lpfc_vport *vport; 2674 struct lpfc_vport **vports; 2675 int i; 2676 bool vpis_cleared = false; 2677 2678 if (!phba) 2679 return 0; 2680 vport = phba->pport; 2681 2682 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2683 return 0; 2684 2685 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2686 "0458 Bring Adapter online\n"); 2687 2688 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 2689 2690 if (!lpfc_sli_queue_setup(phba)) { 2691 lpfc_unblock_mgmt_io(phba); 2692 return 1; 2693 } 2694 2695 if (phba->sli_rev == LPFC_SLI_REV4) { 2696 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2697 lpfc_unblock_mgmt_io(phba); 2698 return 1; 2699 } 2700 spin_lock_irq(&phba->hbalock); 2701 if (!phba->sli4_hba.max_cfg_param.vpi_used) 2702 vpis_cleared = true; 2703 spin_unlock_irq(&phba->hbalock); 2704 } else { 2705 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2706 lpfc_unblock_mgmt_io(phba); 2707 return 1; 2708 } 2709 } 2710 2711 vports = lpfc_create_vport_work_array(phba); 2712 if (vports != NULL) 2713 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2714 struct Scsi_Host *shost; 2715 shost = lpfc_shost_from_vport(vports[i]); 2716 spin_lock_irq(shost->host_lock); 2717 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2718 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2719 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2720 if (phba->sli_rev == LPFC_SLI_REV4) { 2721 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2722 if ((vpis_cleared) && 2723 (vports[i]->port_type != 2724 LPFC_PHYSICAL_PORT)) 2725 vports[i]->vpi = 0; 2726 } 2727 spin_unlock_irq(shost->host_lock); 2728 } 2729 lpfc_destroy_vport_work_array(phba, vports); 2730 2731 lpfc_unblock_mgmt_io(phba); 2732 return 0; 2733 } 2734 2735 /** 2736 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2737 * @phba: pointer to lpfc hba data structure. 2738 * 2739 * This routine marks a HBA's management interface as not blocked. Once the 2740 * HBA's management interface is marked as not blocked, all the user space 2741 * access to the HBA, whether they are from sysfs interface or libdfc 2742 * interface will be allowed. The HBA is set to block the management interface 2743 * when the driver prepares the HBA interface for online or offline and then 2744 * set to unblock the management interface afterwards. 2745 **/ 2746 void 2747 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2748 { 2749 unsigned long iflag; 2750 2751 spin_lock_irqsave(&phba->hbalock, iflag); 2752 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2753 spin_unlock_irqrestore(&phba->hbalock, iflag); 2754 } 2755 2756 /** 2757 * lpfc_offline_prep - Prepare a HBA to be brought offline 2758 * @phba: pointer to lpfc hba data structure. 2759 * 2760 * This routine is invoked to prepare a HBA to be brought offline. It performs 2761 * unregistration login to all the nodes on all vports and flushes the mailbox 2762 * queue to make it ready to be brought offline. 2763 **/ 2764 void 2765 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 2766 { 2767 struct lpfc_vport *vport = phba->pport; 2768 struct lpfc_nodelist *ndlp, *next_ndlp; 2769 struct lpfc_vport **vports; 2770 struct Scsi_Host *shost; 2771 int i; 2772 2773 if (vport->fc_flag & FC_OFFLINE_MODE) 2774 return; 2775 2776 lpfc_block_mgmt_io(phba, mbx_action); 2777 2778 lpfc_linkdown(phba); 2779 2780 /* Issue an unreg_login to all nodes on all vports */ 2781 vports = lpfc_create_vport_work_array(phba); 2782 if (vports != NULL) { 2783 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2784 if (vports[i]->load_flag & FC_UNLOADING) 2785 continue; 2786 shost = lpfc_shost_from_vport(vports[i]); 2787 spin_lock_irq(shost->host_lock); 2788 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2789 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2790 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2791 spin_unlock_irq(shost->host_lock); 2792 2793 shost = lpfc_shost_from_vport(vports[i]); 2794 list_for_each_entry_safe(ndlp, next_ndlp, 2795 &vports[i]->fc_nodes, 2796 nlp_listp) { 2797 if (!NLP_CHK_NODE_ACT(ndlp)) 2798 continue; 2799 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2800 continue; 2801 if (ndlp->nlp_type & NLP_FABRIC) { 2802 lpfc_disc_state_machine(vports[i], ndlp, 2803 NULL, NLP_EVT_DEVICE_RECOVERY); 2804 lpfc_disc_state_machine(vports[i], ndlp, 2805 NULL, NLP_EVT_DEVICE_RM); 2806 } 2807 spin_lock_irq(shost->host_lock); 2808 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2809 spin_unlock_irq(shost->host_lock); 2810 /* 2811 * Whenever an SLI4 port goes offline, free the 2812 * RPI. Get a new RPI when the adapter port 2813 * comes back online. 2814 */ 2815 if (phba->sli_rev == LPFC_SLI_REV4) 2816 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 2817 lpfc_unreg_rpi(vports[i], ndlp); 2818 } 2819 } 2820 } 2821 lpfc_destroy_vport_work_array(phba, vports); 2822 2823 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 2824 } 2825 2826 /** 2827 * lpfc_offline - Bring a HBA offline 2828 * @phba: pointer to lpfc hba data structure. 2829 * 2830 * This routine actually brings a HBA offline. It stops all the timers 2831 * associated with the HBA, brings down the SLI layer, and eventually 2832 * marks the HBA as in offline state for the upper layer protocol. 2833 **/ 2834 void 2835 lpfc_offline(struct lpfc_hba *phba) 2836 { 2837 struct Scsi_Host *shost; 2838 struct lpfc_vport **vports; 2839 int i; 2840 2841 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2842 return; 2843 2844 /* stop port and all timers associated with this hba */ 2845 lpfc_stop_port(phba); 2846 vports = lpfc_create_vport_work_array(phba); 2847 if (vports != NULL) 2848 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2849 lpfc_stop_vport_timers(vports[i]); 2850 lpfc_destroy_vport_work_array(phba, vports); 2851 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2852 "0460 Bring Adapter offline\n"); 2853 /* Bring down the SLI Layer and cleanup. The HBA is offline 2854 now. */ 2855 lpfc_sli_hba_down(phba); 2856 spin_lock_irq(&phba->hbalock); 2857 phba->work_ha = 0; 2858 spin_unlock_irq(&phba->hbalock); 2859 vports = lpfc_create_vport_work_array(phba); 2860 if (vports != NULL) 2861 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2862 shost = lpfc_shost_from_vport(vports[i]); 2863 spin_lock_irq(shost->host_lock); 2864 vports[i]->work_port_events = 0; 2865 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2866 spin_unlock_irq(shost->host_lock); 2867 } 2868 lpfc_destroy_vport_work_array(phba, vports); 2869 } 2870 2871 /** 2872 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2873 * @phba: pointer to lpfc hba data structure. 2874 * 2875 * This routine is to free all the SCSI buffers and IOCBs from the driver 2876 * list back to kernel. It is called from lpfc_pci_remove_one to free 2877 * the internal resources before the device is removed from the system. 2878 **/ 2879 static void 2880 lpfc_scsi_free(struct lpfc_hba *phba) 2881 { 2882 struct lpfc_scsi_buf *sb, *sb_next; 2883 struct lpfc_iocbq *io, *io_next; 2884 2885 spin_lock_irq(&phba->hbalock); 2886 2887 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2888 2889 spin_lock(&phba->scsi_buf_list_put_lock); 2890 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 2891 list) { 2892 list_del(&sb->list); 2893 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2894 sb->dma_handle); 2895 kfree(sb); 2896 phba->total_scsi_bufs--; 2897 } 2898 spin_unlock(&phba->scsi_buf_list_put_lock); 2899 2900 spin_lock(&phba->scsi_buf_list_get_lock); 2901 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 2902 list) { 2903 list_del(&sb->list); 2904 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2905 sb->dma_handle); 2906 kfree(sb); 2907 phba->total_scsi_bufs--; 2908 } 2909 spin_unlock(&phba->scsi_buf_list_get_lock); 2910 2911 /* Release all the lpfc_iocbq entries maintained by this host. */ 2912 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2913 list_del(&io->list); 2914 kfree(io); 2915 phba->total_iocbq_bufs--; 2916 } 2917 2918 spin_unlock_irq(&phba->hbalock); 2919 } 2920 2921 /** 2922 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping 2923 * @phba: pointer to lpfc hba data structure. 2924 * 2925 * This routine first calculates the sizes of the current els and allocated 2926 * scsi sgl lists, and then goes through all sgls to updates the physical 2927 * XRIs assigned due to port function reset. During port initialization, the 2928 * current els and allocated scsi sgl lists are 0s. 2929 * 2930 * Return codes 2931 * 0 - successful (for now, it always returns 0) 2932 **/ 2933 int 2934 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) 2935 { 2936 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 2937 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; 2938 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; 2939 LIST_HEAD(els_sgl_list); 2940 LIST_HEAD(scsi_sgl_list); 2941 int rc; 2942 2943 /* 2944 * update on pci function's els xri-sgl list 2945 */ 2946 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 2947 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 2948 /* els xri-sgl expanded */ 2949 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 2950 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2951 "3157 ELS xri-sgl count increased from " 2952 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 2953 els_xri_cnt); 2954 /* allocate the additional els sgls */ 2955 for (i = 0; i < xri_cnt; i++) { 2956 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 2957 GFP_KERNEL); 2958 if (sglq_entry == NULL) { 2959 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2960 "2562 Failure to allocate an " 2961 "ELS sgl entry:%d\n", i); 2962 rc = -ENOMEM; 2963 goto out_free_mem; 2964 } 2965 sglq_entry->buff_type = GEN_BUFF_TYPE; 2966 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 2967 &sglq_entry->phys); 2968 if (sglq_entry->virt == NULL) { 2969 kfree(sglq_entry); 2970 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2971 "2563 Failure to allocate an " 2972 "ELS mbuf:%d\n", i); 2973 rc = -ENOMEM; 2974 goto out_free_mem; 2975 } 2976 sglq_entry->sgl = sglq_entry->virt; 2977 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 2978 sglq_entry->state = SGL_FREED; 2979 list_add_tail(&sglq_entry->list, &els_sgl_list); 2980 } 2981 spin_lock_irq(&phba->hbalock); 2982 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 2983 spin_unlock_irq(&phba->hbalock); 2984 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 2985 /* els xri-sgl shrinked */ 2986 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 2987 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2988 "3158 ELS xri-sgl count decreased from " 2989 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 2990 els_xri_cnt); 2991 spin_lock_irq(&phba->hbalock); 2992 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); 2993 spin_unlock_irq(&phba->hbalock); 2994 /* release extra els sgls from list */ 2995 for (i = 0; i < xri_cnt; i++) { 2996 list_remove_head(&els_sgl_list, 2997 sglq_entry, struct lpfc_sglq, list); 2998 if (sglq_entry) { 2999 lpfc_mbuf_free(phba, sglq_entry->virt, 3000 sglq_entry->phys); 3001 kfree(sglq_entry); 3002 } 3003 } 3004 spin_lock_irq(&phba->hbalock); 3005 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3006 spin_unlock_irq(&phba->hbalock); 3007 } else 3008 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3009 "3163 ELS xri-sgl count unchanged: %d\n", 3010 els_xri_cnt); 3011 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3012 3013 /* update xris to els sgls on the list */ 3014 sglq_entry = NULL; 3015 sglq_entry_next = NULL; 3016 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3017 &phba->sli4_hba.lpfc_sgl_list, list) { 3018 lxri = lpfc_sli4_next_xritag(phba); 3019 if (lxri == NO_XRI) { 3020 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3021 "2400 Failed to allocate xri for " 3022 "ELS sgl\n"); 3023 rc = -ENOMEM; 3024 goto out_free_mem; 3025 } 3026 sglq_entry->sli4_lxritag = lxri; 3027 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3028 } 3029 3030 /* 3031 * update on pci function's allocated scsi xri-sgl list 3032 */ 3033 phba->total_scsi_bufs = 0; 3034 3035 /* maximum number of xris available for scsi buffers */ 3036 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - 3037 els_xri_cnt; 3038 3039 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3040 "2401 Current allocated SCSI xri-sgl count:%d, " 3041 "maximum SCSI xri count:%d\n", 3042 phba->sli4_hba.scsi_xri_cnt, 3043 phba->sli4_hba.scsi_xri_max); 3044 3045 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3046 spin_lock(&phba->scsi_buf_list_put_lock); 3047 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3048 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3049 spin_unlock(&phba->scsi_buf_list_put_lock); 3050 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3051 3052 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3053 /* max scsi xri shrinked below the allocated scsi buffers */ 3054 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - 3055 phba->sli4_hba.scsi_xri_max; 3056 /* release the extra allocated scsi buffers */ 3057 for (i = 0; i < scsi_xri_cnt; i++) { 3058 list_remove_head(&scsi_sgl_list, psb, 3059 struct lpfc_scsi_buf, list); 3060 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data, 3061 psb->dma_handle); 3062 kfree(psb); 3063 } 3064 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3065 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3066 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3067 } 3068 3069 /* update xris associated to remaining allocated scsi buffers */ 3070 psb = NULL; 3071 psb_next = NULL; 3072 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { 3073 lxri = lpfc_sli4_next_xritag(phba); 3074 if (lxri == NO_XRI) { 3075 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3076 "2560 Failed to allocate xri for " 3077 "scsi buffer\n"); 3078 rc = -ENOMEM; 3079 goto out_free_mem; 3080 } 3081 psb->cur_iocbq.sli4_lxritag = lxri; 3082 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3083 } 3084 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3085 spin_lock(&phba->scsi_buf_list_put_lock); 3086 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3087 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3088 spin_unlock(&phba->scsi_buf_list_put_lock); 3089 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3090 3091 return 0; 3092 3093 out_free_mem: 3094 lpfc_free_els_sgl_list(phba); 3095 lpfc_scsi_free(phba); 3096 return rc; 3097 } 3098 3099 /** 3100 * lpfc_create_port - Create an FC port 3101 * @phba: pointer to lpfc hba data structure. 3102 * @instance: a unique integer ID to this FC port. 3103 * @dev: pointer to the device data structure. 3104 * 3105 * This routine creates a FC port for the upper layer protocol. The FC port 3106 * can be created on top of either a physical port or a virtual port provided 3107 * by the HBA. This routine also allocates a SCSI host data structure (shost) 3108 * and associates the FC port created before adding the shost into the SCSI 3109 * layer. 3110 * 3111 * Return codes 3112 * @vport - pointer to the virtual N_Port data structure. 3113 * NULL - port create failed. 3114 **/ 3115 struct lpfc_vport * 3116 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 3117 { 3118 struct lpfc_vport *vport; 3119 struct Scsi_Host *shost; 3120 int error = 0; 3121 3122 if (dev != &phba->pcidev->dev) 3123 shost = scsi_host_alloc(&lpfc_vport_template, 3124 sizeof(struct lpfc_vport)); 3125 else 3126 shost = scsi_host_alloc(&lpfc_template, 3127 sizeof(struct lpfc_vport)); 3128 if (!shost) 3129 goto out; 3130 3131 vport = (struct lpfc_vport *) shost->hostdata; 3132 vport->phba = phba; 3133 vport->load_flag |= FC_LOADING; 3134 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3135 vport->fc_rscn_flush = 0; 3136 3137 lpfc_get_vport_cfgparam(vport); 3138 shost->unique_id = instance; 3139 shost->max_id = LPFC_MAX_TARGET; 3140 shost->max_lun = vport->cfg_max_luns; 3141 shost->this_id = -1; 3142 shost->max_cmd_len = 16; 3143 if (phba->sli_rev == LPFC_SLI_REV4) { 3144 shost->dma_boundary = 3145 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3146 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3147 } 3148 3149 /* 3150 * Set initial can_queue value since 0 is no longer supported and 3151 * scsi_add_host will fail. This will be adjusted later based on the 3152 * max xri value determined in hba setup. 3153 */ 3154 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3155 if (dev != &phba->pcidev->dev) { 3156 shost->transportt = lpfc_vport_transport_template; 3157 vport->port_type = LPFC_NPIV_PORT; 3158 } else { 3159 shost->transportt = lpfc_transport_template; 3160 vport->port_type = LPFC_PHYSICAL_PORT; 3161 } 3162 3163 /* Initialize all internally managed lists. */ 3164 INIT_LIST_HEAD(&vport->fc_nodes); 3165 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3166 spin_lock_init(&vport->work_port_lock); 3167 3168 init_timer(&vport->fc_disctmo); 3169 vport->fc_disctmo.function = lpfc_disc_timeout; 3170 vport->fc_disctmo.data = (unsigned long)vport; 3171 3172 init_timer(&vport->fc_fdmitmo); 3173 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 3174 vport->fc_fdmitmo.data = (unsigned long)vport; 3175 3176 init_timer(&vport->els_tmofunc); 3177 vport->els_tmofunc.function = lpfc_els_timeout; 3178 vport->els_tmofunc.data = (unsigned long)vport; 3179 3180 init_timer(&vport->delayed_disc_tmo); 3181 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 3182 vport->delayed_disc_tmo.data = (unsigned long)vport; 3183 3184 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3185 if (error) 3186 goto out_put_shost; 3187 3188 spin_lock_irq(&phba->hbalock); 3189 list_add_tail(&vport->listentry, &phba->port_list); 3190 spin_unlock_irq(&phba->hbalock); 3191 return vport; 3192 3193 out_put_shost: 3194 scsi_host_put(shost); 3195 out: 3196 return NULL; 3197 } 3198 3199 /** 3200 * destroy_port - destroy an FC port 3201 * @vport: pointer to an lpfc virtual N_Port data structure. 3202 * 3203 * This routine destroys a FC port from the upper layer protocol. All the 3204 * resources associated with the port are released. 3205 **/ 3206 void 3207 destroy_port(struct lpfc_vport *vport) 3208 { 3209 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3210 struct lpfc_hba *phba = vport->phba; 3211 3212 lpfc_debugfs_terminate(vport); 3213 fc_remove_host(shost); 3214 scsi_remove_host(shost); 3215 3216 spin_lock_irq(&phba->hbalock); 3217 list_del_init(&vport->listentry); 3218 spin_unlock_irq(&phba->hbalock); 3219 3220 lpfc_cleanup(vport); 3221 return; 3222 } 3223 3224 /** 3225 * lpfc_get_instance - Get a unique integer ID 3226 * 3227 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 3228 * uses the kernel idr facility to perform the task. 3229 * 3230 * Return codes: 3231 * instance - a unique integer ID allocated as the new instance. 3232 * -1 - lpfc get instance failed. 3233 **/ 3234 int 3235 lpfc_get_instance(void) 3236 { 3237 int ret; 3238 3239 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 3240 return ret < 0 ? -1 : ret; 3241 } 3242 3243 /** 3244 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 3245 * @shost: pointer to SCSI host data structure. 3246 * @time: elapsed time of the scan in jiffies. 3247 * 3248 * This routine is called by the SCSI layer with a SCSI host to determine 3249 * whether the scan host is finished. 3250 * 3251 * Note: there is no scan_start function as adapter initialization will have 3252 * asynchronously kicked off the link initialization. 3253 * 3254 * Return codes 3255 * 0 - SCSI host scan is not over yet. 3256 * 1 - SCSI host scan is over. 3257 **/ 3258 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 3259 { 3260 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3261 struct lpfc_hba *phba = vport->phba; 3262 int stat = 0; 3263 3264 spin_lock_irq(shost->host_lock); 3265 3266 if (vport->load_flag & FC_UNLOADING) { 3267 stat = 1; 3268 goto finished; 3269 } 3270 if (time >= msecs_to_jiffies(30 * 1000)) { 3271 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3272 "0461 Scanning longer than 30 " 3273 "seconds. Continuing initialization\n"); 3274 stat = 1; 3275 goto finished; 3276 } 3277 if (time >= msecs_to_jiffies(15 * 1000) && 3278 phba->link_state <= LPFC_LINK_DOWN) { 3279 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3280 "0465 Link down longer than 15 " 3281 "seconds. Continuing initialization\n"); 3282 stat = 1; 3283 goto finished; 3284 } 3285 3286 if (vport->port_state != LPFC_VPORT_READY) 3287 goto finished; 3288 if (vport->num_disc_nodes || vport->fc_prli_sent) 3289 goto finished; 3290 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 3291 goto finished; 3292 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3293 goto finished; 3294 3295 stat = 1; 3296 3297 finished: 3298 spin_unlock_irq(shost->host_lock); 3299 return stat; 3300 } 3301 3302 /** 3303 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 3304 * @shost: pointer to SCSI host data structure. 3305 * 3306 * This routine initializes a given SCSI host attributes on a FC port. The 3307 * SCSI host can be either on top of a physical port or a virtual port. 3308 **/ 3309 void lpfc_host_attrib_init(struct Scsi_Host *shost) 3310 { 3311 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3312 struct lpfc_hba *phba = vport->phba; 3313 /* 3314 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 3315 */ 3316 3317 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 3318 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3319 fc_host_supported_classes(shost) = FC_COS_CLASS3; 3320 3321 memset(fc_host_supported_fc4s(shost), 0, 3322 sizeof(fc_host_supported_fc4s(shost))); 3323 fc_host_supported_fc4s(shost)[2] = 1; 3324 fc_host_supported_fc4s(shost)[7] = 1; 3325 3326 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 3327 sizeof fc_host_symbolic_name(shost)); 3328 3329 fc_host_supported_speeds(shost) = 0; 3330 if (phba->lmt & LMT_16Gb) 3331 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 3332 if (phba->lmt & LMT_10Gb) 3333 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 3334 if (phba->lmt & LMT_8Gb) 3335 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 3336 if (phba->lmt & LMT_4Gb) 3337 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 3338 if (phba->lmt & LMT_2Gb) 3339 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 3340 if (phba->lmt & LMT_1Gb) 3341 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 3342 3343 fc_host_maxframe_size(shost) = 3344 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 3345 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 3346 3347 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 3348 3349 /* This value is also unchanging */ 3350 memset(fc_host_active_fc4s(shost), 0, 3351 sizeof(fc_host_active_fc4s(shost))); 3352 fc_host_active_fc4s(shost)[2] = 1; 3353 fc_host_active_fc4s(shost)[7] = 1; 3354 3355 fc_host_max_npiv_vports(shost) = phba->max_vpi; 3356 spin_lock_irq(shost->host_lock); 3357 vport->load_flag &= ~FC_LOADING; 3358 spin_unlock_irq(shost->host_lock); 3359 } 3360 3361 /** 3362 * lpfc_stop_port_s3 - Stop SLI3 device port 3363 * @phba: pointer to lpfc hba data structure. 3364 * 3365 * This routine is invoked to stop an SLI3 device port, it stops the device 3366 * from generating interrupts and stops the device driver's timers for the 3367 * device. 3368 **/ 3369 static void 3370 lpfc_stop_port_s3(struct lpfc_hba *phba) 3371 { 3372 /* Clear all interrupt enable conditions */ 3373 writel(0, phba->HCregaddr); 3374 readl(phba->HCregaddr); /* flush */ 3375 /* Clear all pending interrupts */ 3376 writel(0xffffffff, phba->HAregaddr); 3377 readl(phba->HAregaddr); /* flush */ 3378 3379 /* Reset some HBA SLI setup states */ 3380 lpfc_stop_hba_timers(phba); 3381 phba->pport->work_port_events = 0; 3382 } 3383 3384 /** 3385 * lpfc_stop_port_s4 - Stop SLI4 device port 3386 * @phba: pointer to lpfc hba data structure. 3387 * 3388 * This routine is invoked to stop an SLI4 device port, it stops the device 3389 * from generating interrupts and stops the device driver's timers for the 3390 * device. 3391 **/ 3392 static void 3393 lpfc_stop_port_s4(struct lpfc_hba *phba) 3394 { 3395 /* Reset some HBA SLI4 setup states */ 3396 lpfc_stop_hba_timers(phba); 3397 phba->pport->work_port_events = 0; 3398 phba->sli4_hba.intr_enable = 0; 3399 } 3400 3401 /** 3402 * lpfc_stop_port - Wrapper function for stopping hba port 3403 * @phba: Pointer to HBA context object. 3404 * 3405 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 3406 * the API jump table function pointer from the lpfc_hba struct. 3407 **/ 3408 void 3409 lpfc_stop_port(struct lpfc_hba *phba) 3410 { 3411 phba->lpfc_stop_port(phba); 3412 } 3413 3414 /** 3415 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 3416 * @phba: Pointer to hba for which this call is being executed. 3417 * 3418 * This routine starts the timer waiting for the FCF rediscovery to complete. 3419 **/ 3420 void 3421 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 3422 { 3423 unsigned long fcf_redisc_wait_tmo = 3424 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 3425 /* Start fcf rediscovery wait period timer */ 3426 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 3427 spin_lock_irq(&phba->hbalock); 3428 /* Allow action to new fcf asynchronous event */ 3429 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 3430 /* Mark the FCF rediscovery pending state */ 3431 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 3432 spin_unlock_irq(&phba->hbalock); 3433 } 3434 3435 /** 3436 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 3437 * @ptr: Map to lpfc_hba data structure pointer. 3438 * 3439 * This routine is invoked when waiting for FCF table rediscover has been 3440 * timed out. If new FCF record(s) has (have) been discovered during the 3441 * wait period, a new FCF event shall be added to the FCOE async event 3442 * list, and then worker thread shall be waked up for processing from the 3443 * worker thread context. 3444 **/ 3445 void 3446 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3447 { 3448 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3449 3450 /* Don't send FCF rediscovery event if timer cancelled */ 3451 spin_lock_irq(&phba->hbalock); 3452 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3453 spin_unlock_irq(&phba->hbalock); 3454 return; 3455 } 3456 /* Clear FCF rediscovery timer pending flag */ 3457 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3458 /* FCF rediscovery event to worker thread */ 3459 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3460 spin_unlock_irq(&phba->hbalock); 3461 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3462 "2776 FCF rediscover quiescent timer expired\n"); 3463 /* wake up worker thread */ 3464 lpfc_worker_wake_up(phba); 3465 } 3466 3467 /** 3468 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3469 * @phba: pointer to lpfc hba data structure. 3470 * @acqe_link: pointer to the async link completion queue entry. 3471 * 3472 * This routine is to parse the SLI4 link-attention link fault code and 3473 * translate it into the base driver's read link attention mailbox command 3474 * status. 3475 * 3476 * Return: Link-attention status in terms of base driver's coding. 3477 **/ 3478 static uint16_t 3479 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3480 struct lpfc_acqe_link *acqe_link) 3481 { 3482 uint16_t latt_fault; 3483 3484 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3485 case LPFC_ASYNC_LINK_FAULT_NONE: 3486 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3487 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3488 latt_fault = 0; 3489 break; 3490 default: 3491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3492 "0398 Invalid link fault code: x%x\n", 3493 bf_get(lpfc_acqe_link_fault, acqe_link)); 3494 latt_fault = MBXERR_ERROR; 3495 break; 3496 } 3497 return latt_fault; 3498 } 3499 3500 /** 3501 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3502 * @phba: pointer to lpfc hba data structure. 3503 * @acqe_link: pointer to the async link completion queue entry. 3504 * 3505 * This routine is to parse the SLI4 link attention type and translate it 3506 * into the base driver's link attention type coding. 3507 * 3508 * Return: Link attention type in terms of base driver's coding. 3509 **/ 3510 static uint8_t 3511 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3512 struct lpfc_acqe_link *acqe_link) 3513 { 3514 uint8_t att_type; 3515 3516 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3517 case LPFC_ASYNC_LINK_STATUS_DOWN: 3518 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3519 att_type = LPFC_ATT_LINK_DOWN; 3520 break; 3521 case LPFC_ASYNC_LINK_STATUS_UP: 3522 /* Ignore physical link up events - wait for logical link up */ 3523 att_type = LPFC_ATT_RESERVED; 3524 break; 3525 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3526 att_type = LPFC_ATT_LINK_UP; 3527 break; 3528 default: 3529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3530 "0399 Invalid link attention type: x%x\n", 3531 bf_get(lpfc_acqe_link_status, acqe_link)); 3532 att_type = LPFC_ATT_RESERVED; 3533 break; 3534 } 3535 return att_type; 3536 } 3537 3538 /** 3539 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3540 * @phba: pointer to lpfc hba data structure. 3541 * @acqe_link: pointer to the async link completion queue entry. 3542 * 3543 * This routine is to parse the SLI4 link-attention link speed and translate 3544 * it into the base driver's link-attention link speed coding. 3545 * 3546 * Return: Link-attention link speed in terms of base driver's coding. 3547 **/ 3548 static uint8_t 3549 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3550 struct lpfc_acqe_link *acqe_link) 3551 { 3552 uint8_t link_speed; 3553 3554 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3555 case LPFC_ASYNC_LINK_SPEED_ZERO: 3556 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3557 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3558 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3559 break; 3560 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3561 link_speed = LPFC_LINK_SPEED_1GHZ; 3562 break; 3563 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3564 link_speed = LPFC_LINK_SPEED_10GHZ; 3565 break; 3566 default: 3567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3568 "0483 Invalid link-attention link speed: x%x\n", 3569 bf_get(lpfc_acqe_link_speed, acqe_link)); 3570 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3571 break; 3572 } 3573 return link_speed; 3574 } 3575 3576 /** 3577 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 3578 * @phba: pointer to lpfc hba data structure. 3579 * 3580 * This routine is to get an SLI3 FC port's link speed in Mbps. 3581 * 3582 * Return: link speed in terms of Mbps. 3583 **/ 3584 uint32_t 3585 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 3586 { 3587 uint32_t link_speed; 3588 3589 if (!lpfc_is_link_up(phba)) 3590 return 0; 3591 3592 switch (phba->fc_linkspeed) { 3593 case LPFC_LINK_SPEED_1GHZ: 3594 link_speed = 1000; 3595 break; 3596 case LPFC_LINK_SPEED_2GHZ: 3597 link_speed = 2000; 3598 break; 3599 case LPFC_LINK_SPEED_4GHZ: 3600 link_speed = 4000; 3601 break; 3602 case LPFC_LINK_SPEED_8GHZ: 3603 link_speed = 8000; 3604 break; 3605 case LPFC_LINK_SPEED_10GHZ: 3606 link_speed = 10000; 3607 break; 3608 case LPFC_LINK_SPEED_16GHZ: 3609 link_speed = 16000; 3610 break; 3611 default: 3612 link_speed = 0; 3613 } 3614 return link_speed; 3615 } 3616 3617 /** 3618 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 3619 * @phba: pointer to lpfc hba data structure. 3620 * @evt_code: asynchronous event code. 3621 * @speed_code: asynchronous event link speed code. 3622 * 3623 * This routine is to parse the giving SLI4 async event link speed code into 3624 * value of Mbps for the link speed. 3625 * 3626 * Return: link speed in terms of Mbps. 3627 **/ 3628 static uint32_t 3629 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 3630 uint8_t speed_code) 3631 { 3632 uint32_t port_speed; 3633 3634 switch (evt_code) { 3635 case LPFC_TRAILER_CODE_LINK: 3636 switch (speed_code) { 3637 case LPFC_EVT_CODE_LINK_NO_LINK: 3638 port_speed = 0; 3639 break; 3640 case LPFC_EVT_CODE_LINK_10_MBIT: 3641 port_speed = 10; 3642 break; 3643 case LPFC_EVT_CODE_LINK_100_MBIT: 3644 port_speed = 100; 3645 break; 3646 case LPFC_EVT_CODE_LINK_1_GBIT: 3647 port_speed = 1000; 3648 break; 3649 case LPFC_EVT_CODE_LINK_10_GBIT: 3650 port_speed = 10000; 3651 break; 3652 default: 3653 port_speed = 0; 3654 } 3655 break; 3656 case LPFC_TRAILER_CODE_FC: 3657 switch (speed_code) { 3658 case LPFC_EVT_CODE_FC_NO_LINK: 3659 port_speed = 0; 3660 break; 3661 case LPFC_EVT_CODE_FC_1_GBAUD: 3662 port_speed = 1000; 3663 break; 3664 case LPFC_EVT_CODE_FC_2_GBAUD: 3665 port_speed = 2000; 3666 break; 3667 case LPFC_EVT_CODE_FC_4_GBAUD: 3668 port_speed = 4000; 3669 break; 3670 case LPFC_EVT_CODE_FC_8_GBAUD: 3671 port_speed = 8000; 3672 break; 3673 case LPFC_EVT_CODE_FC_10_GBAUD: 3674 port_speed = 10000; 3675 break; 3676 case LPFC_EVT_CODE_FC_16_GBAUD: 3677 port_speed = 16000; 3678 break; 3679 default: 3680 port_speed = 0; 3681 } 3682 break; 3683 default: 3684 port_speed = 0; 3685 } 3686 return port_speed; 3687 } 3688 3689 /** 3690 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3691 * @phba: pointer to lpfc hba data structure. 3692 * @acqe_link: pointer to the async link completion queue entry. 3693 * 3694 * This routine is to handle the SLI4 asynchronous FCoE link event. 3695 **/ 3696 static void 3697 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3698 struct lpfc_acqe_link *acqe_link) 3699 { 3700 struct lpfc_dmabuf *mp; 3701 LPFC_MBOXQ_t *pmb; 3702 MAILBOX_t *mb; 3703 struct lpfc_mbx_read_top *la; 3704 uint8_t att_type; 3705 int rc; 3706 3707 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3708 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3709 return; 3710 phba->fcoe_eventtag = acqe_link->event_tag; 3711 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3712 if (!pmb) { 3713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3714 "0395 The mboxq allocation failed\n"); 3715 return; 3716 } 3717 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3718 if (!mp) { 3719 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3720 "0396 The lpfc_dmabuf allocation failed\n"); 3721 goto out_free_pmb; 3722 } 3723 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3724 if (!mp->virt) { 3725 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3726 "0397 The mbuf allocation failed\n"); 3727 goto out_free_dmabuf; 3728 } 3729 3730 /* Cleanup any outstanding ELS commands */ 3731 lpfc_els_flush_all_cmd(phba); 3732 3733 /* Block ELS IOCBs until we have done process link event */ 3734 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3735 3736 /* Update link event statistics */ 3737 phba->sli.slistat.link_event++; 3738 3739 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3740 lpfc_read_topology(phba, pmb, mp); 3741 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3742 pmb->vport = phba->pport; 3743 3744 /* Keep the link status for extra SLI4 state machine reference */ 3745 phba->sli4_hba.link_state.speed = 3746 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 3747 bf_get(lpfc_acqe_link_speed, acqe_link)); 3748 phba->sli4_hba.link_state.duplex = 3749 bf_get(lpfc_acqe_link_duplex, acqe_link); 3750 phba->sli4_hba.link_state.status = 3751 bf_get(lpfc_acqe_link_status, acqe_link); 3752 phba->sli4_hba.link_state.type = 3753 bf_get(lpfc_acqe_link_type, acqe_link); 3754 phba->sli4_hba.link_state.number = 3755 bf_get(lpfc_acqe_link_number, acqe_link); 3756 phba->sli4_hba.link_state.fault = 3757 bf_get(lpfc_acqe_link_fault, acqe_link); 3758 phba->sli4_hba.link_state.logical_speed = 3759 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 3760 3761 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3762 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3763 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3764 "Logical speed:%dMbps Fault:%d\n", 3765 phba->sli4_hba.link_state.speed, 3766 phba->sli4_hba.link_state.topology, 3767 phba->sli4_hba.link_state.status, 3768 phba->sli4_hba.link_state.type, 3769 phba->sli4_hba.link_state.number, 3770 phba->sli4_hba.link_state.logical_speed, 3771 phba->sli4_hba.link_state.fault); 3772 /* 3773 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3774 * topology info. Note: Optional for non FC-AL ports. 3775 */ 3776 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3777 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3778 if (rc == MBX_NOT_FINISHED) 3779 goto out_free_dmabuf; 3780 return; 3781 } 3782 /* 3783 * For FCoE Mode: fill in all the topology information we need and call 3784 * the READ_TOPOLOGY completion routine to continue without actually 3785 * sending the READ_TOPOLOGY mailbox command to the port. 3786 */ 3787 /* Parse and translate status field */ 3788 mb = &pmb->u.mb; 3789 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3790 3791 /* Parse and translate link attention fields */ 3792 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3793 la->eventTag = acqe_link->event_tag; 3794 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3795 bf_set(lpfc_mbx_read_top_link_spd, la, 3796 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3797 3798 /* Fake the the following irrelvant fields */ 3799 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3800 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3801 bf_set(lpfc_mbx_read_top_il, la, 0); 3802 bf_set(lpfc_mbx_read_top_pb, la, 0); 3803 bf_set(lpfc_mbx_read_top_fa, la, 0); 3804 bf_set(lpfc_mbx_read_top_mm, la, 0); 3805 3806 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3807 lpfc_mbx_cmpl_read_topology(phba, pmb); 3808 3809 return; 3810 3811 out_free_dmabuf: 3812 kfree(mp); 3813 out_free_pmb: 3814 mempool_free(pmb, phba->mbox_mem_pool); 3815 } 3816 3817 /** 3818 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3819 * @phba: pointer to lpfc hba data structure. 3820 * @acqe_fc: pointer to the async fc completion queue entry. 3821 * 3822 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 3823 * that the event was received and then issue a read_topology mailbox command so 3824 * that the rest of the driver will treat it the same as SLI3. 3825 **/ 3826 static void 3827 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 3828 { 3829 struct lpfc_dmabuf *mp; 3830 LPFC_MBOXQ_t *pmb; 3831 int rc; 3832 3833 if (bf_get(lpfc_trailer_type, acqe_fc) != 3834 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 3835 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3836 "2895 Non FC link Event detected.(%d)\n", 3837 bf_get(lpfc_trailer_type, acqe_fc)); 3838 return; 3839 } 3840 /* Keep the link status for extra SLI4 state machine reference */ 3841 phba->sli4_hba.link_state.speed = 3842 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 3843 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 3844 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3845 phba->sli4_hba.link_state.topology = 3846 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3847 phba->sli4_hba.link_state.status = 3848 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 3849 phba->sli4_hba.link_state.type = 3850 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 3851 phba->sli4_hba.link_state.number = 3852 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 3853 phba->sli4_hba.link_state.fault = 3854 bf_get(lpfc_acqe_link_fault, acqe_fc); 3855 phba->sli4_hba.link_state.logical_speed = 3856 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 3857 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3858 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3859 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3860 "%dMbps Fault:%d\n", 3861 phba->sli4_hba.link_state.speed, 3862 phba->sli4_hba.link_state.topology, 3863 phba->sli4_hba.link_state.status, 3864 phba->sli4_hba.link_state.type, 3865 phba->sli4_hba.link_state.number, 3866 phba->sli4_hba.link_state.logical_speed, 3867 phba->sli4_hba.link_state.fault); 3868 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3869 if (!pmb) { 3870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3871 "2897 The mboxq allocation failed\n"); 3872 return; 3873 } 3874 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3875 if (!mp) { 3876 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3877 "2898 The lpfc_dmabuf allocation failed\n"); 3878 goto out_free_pmb; 3879 } 3880 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3881 if (!mp->virt) { 3882 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3883 "2899 The mbuf allocation failed\n"); 3884 goto out_free_dmabuf; 3885 } 3886 3887 /* Cleanup any outstanding ELS commands */ 3888 lpfc_els_flush_all_cmd(phba); 3889 3890 /* Block ELS IOCBs until we have done process link event */ 3891 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3892 3893 /* Update link event statistics */ 3894 phba->sli.slistat.link_event++; 3895 3896 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3897 lpfc_read_topology(phba, pmb, mp); 3898 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3899 pmb->vport = phba->pport; 3900 3901 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3902 if (rc == MBX_NOT_FINISHED) 3903 goto out_free_dmabuf; 3904 return; 3905 3906 out_free_dmabuf: 3907 kfree(mp); 3908 out_free_pmb: 3909 mempool_free(pmb, phba->mbox_mem_pool); 3910 } 3911 3912 /** 3913 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 3914 * @phba: pointer to lpfc hba data structure. 3915 * @acqe_fc: pointer to the async SLI completion queue entry. 3916 * 3917 * This routine is to handle the SLI4 asynchronous SLI events. 3918 **/ 3919 static void 3920 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 3921 { 3922 char port_name; 3923 char message[128]; 3924 uint8_t status; 3925 struct lpfc_acqe_misconfigured_event *misconfigured; 3926 3927 /* special case misconfigured event as it contains data for all ports */ 3928 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3929 LPFC_SLI_INTF_IF_TYPE_2) || 3930 (bf_get(lpfc_trailer_type, acqe_sli) != 3931 LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) { 3932 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3933 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 3934 "x%08x SLI Event Type:%d\n", 3935 acqe_sli->event_data1, acqe_sli->event_data2, 3936 bf_get(lpfc_trailer_type, acqe_sli)); 3937 return; 3938 } 3939 3940 port_name = phba->Port[0]; 3941 if (port_name == 0x00) 3942 port_name = '?'; /* get port name is empty */ 3943 3944 misconfigured = (struct lpfc_acqe_misconfigured_event *) 3945 &acqe_sli->event_data1; 3946 3947 /* fetch the status for this port */ 3948 switch (phba->sli4_hba.lnk_info.lnk_no) { 3949 case LPFC_LINK_NUMBER_0: 3950 status = bf_get(lpfc_sli_misconfigured_port0, 3951 &misconfigured->theEvent); 3952 break; 3953 case LPFC_LINK_NUMBER_1: 3954 status = bf_get(lpfc_sli_misconfigured_port1, 3955 &misconfigured->theEvent); 3956 break; 3957 case LPFC_LINK_NUMBER_2: 3958 status = bf_get(lpfc_sli_misconfigured_port2, 3959 &misconfigured->theEvent); 3960 break; 3961 case LPFC_LINK_NUMBER_3: 3962 status = bf_get(lpfc_sli_misconfigured_port3, 3963 &misconfigured->theEvent); 3964 break; 3965 default: 3966 status = ~LPFC_SLI_EVENT_STATUS_VALID; 3967 break; 3968 } 3969 3970 switch (status) { 3971 case LPFC_SLI_EVENT_STATUS_VALID: 3972 return; /* no message if the sfp is okay */ 3973 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 3974 sprintf(message, "Optics faulted/incorrectly installed/not " \ 3975 "installed - Reseat optics, if issue not " 3976 "resolved, replace."); 3977 break; 3978 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 3979 sprintf(message, 3980 "Optics of two types installed - Remove one optic or " \ 3981 "install matching pair of optics."); 3982 break; 3983 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 3984 sprintf(message, "Incompatible optics - Replace with " \ 3985 "compatible optics for card to function."); 3986 break; 3987 default: 3988 /* firmware is reporting a status we don't know about */ 3989 sprintf(message, "Unknown event status x%02x", status); 3990 break; 3991 } 3992 3993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3994 "3176 Misconfigured Physical Port - " 3995 "Port Name %c %s\n", port_name, message); 3996 } 3997 3998 /** 3999 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 4000 * @vport: pointer to vport data structure. 4001 * 4002 * This routine is to perform Clear Virtual Link (CVL) on a vport in 4003 * response to a CVL event. 4004 * 4005 * Return the pointer to the ndlp with the vport if successful, otherwise 4006 * return NULL. 4007 **/ 4008 static struct lpfc_nodelist * 4009 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 4010 { 4011 struct lpfc_nodelist *ndlp; 4012 struct Scsi_Host *shost; 4013 struct lpfc_hba *phba; 4014 4015 if (!vport) 4016 return NULL; 4017 phba = vport->phba; 4018 if (!phba) 4019 return NULL; 4020 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4021 if (!ndlp) { 4022 /* Cannot find existing Fabric ndlp, so allocate a new one */ 4023 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4024 if (!ndlp) 4025 return 0; 4026 lpfc_nlp_init(vport, ndlp, Fabric_DID); 4027 /* Set the node type */ 4028 ndlp->nlp_type |= NLP_FABRIC; 4029 /* Put ndlp onto node list */ 4030 lpfc_enqueue_node(vport, ndlp); 4031 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4032 /* re-setup ndlp without removing from node list */ 4033 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4034 if (!ndlp) 4035 return 0; 4036 } 4037 if ((phba->pport->port_state < LPFC_FLOGI) && 4038 (phba->pport->port_state != LPFC_VPORT_FAILED)) 4039 return NULL; 4040 /* If virtual link is not yet instantiated ignore CVL */ 4041 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 4042 && (vport->port_state != LPFC_VPORT_FAILED)) 4043 return NULL; 4044 shost = lpfc_shost_from_vport(vport); 4045 if (!shost) 4046 return NULL; 4047 lpfc_linkdown_port(vport); 4048 lpfc_cleanup_pending_mbox(vport); 4049 spin_lock_irq(shost->host_lock); 4050 vport->fc_flag |= FC_VPORT_CVL_RCVD; 4051 spin_unlock_irq(shost->host_lock); 4052 4053 return ndlp; 4054 } 4055 4056 /** 4057 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 4058 * @vport: pointer to lpfc hba data structure. 4059 * 4060 * This routine is to perform Clear Virtual Link (CVL) on all vports in 4061 * response to a FCF dead event. 4062 **/ 4063 static void 4064 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 4065 { 4066 struct lpfc_vport **vports; 4067 int i; 4068 4069 vports = lpfc_create_vport_work_array(phba); 4070 if (vports) 4071 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 4072 lpfc_sli4_perform_vport_cvl(vports[i]); 4073 lpfc_destroy_vport_work_array(phba, vports); 4074 } 4075 4076 /** 4077 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4078 * @phba: pointer to lpfc hba data structure. 4079 * @acqe_link: pointer to the async fcoe completion queue entry. 4080 * 4081 * This routine is to handle the SLI4 asynchronous fcoe event. 4082 **/ 4083 static void 4084 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 4085 struct lpfc_acqe_fip *acqe_fip) 4086 { 4087 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 4088 int rc; 4089 struct lpfc_vport *vport; 4090 struct lpfc_nodelist *ndlp; 4091 struct Scsi_Host *shost; 4092 int active_vlink_present; 4093 struct lpfc_vport **vports; 4094 int i; 4095 4096 phba->fc_eventTag = acqe_fip->event_tag; 4097 phba->fcoe_eventtag = acqe_fip->event_tag; 4098 switch (event_type) { 4099 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 4100 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 4101 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 4102 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4103 LOG_DISCOVERY, 4104 "2546 New FCF event, evt_tag:x%x, " 4105 "index:x%x\n", 4106 acqe_fip->event_tag, 4107 acqe_fip->index); 4108 else 4109 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 4110 LOG_DISCOVERY, 4111 "2788 FCF param modified event, " 4112 "evt_tag:x%x, index:x%x\n", 4113 acqe_fip->event_tag, 4114 acqe_fip->index); 4115 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4116 /* 4117 * During period of FCF discovery, read the FCF 4118 * table record indexed by the event to update 4119 * FCF roundrobin failover eligible FCF bmask. 4120 */ 4121 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4122 LOG_DISCOVERY, 4123 "2779 Read FCF (x%x) for updating " 4124 "roundrobin FCF failover bmask\n", 4125 acqe_fip->index); 4126 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 4127 } 4128 4129 /* If the FCF discovery is in progress, do nothing. */ 4130 spin_lock_irq(&phba->hbalock); 4131 if (phba->hba_flag & FCF_TS_INPROG) { 4132 spin_unlock_irq(&phba->hbalock); 4133 break; 4134 } 4135 /* If fast FCF failover rescan event is pending, do nothing */ 4136 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 4137 spin_unlock_irq(&phba->hbalock); 4138 break; 4139 } 4140 4141 /* If the FCF has been in discovered state, do nothing. */ 4142 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 4143 spin_unlock_irq(&phba->hbalock); 4144 break; 4145 } 4146 spin_unlock_irq(&phba->hbalock); 4147 4148 /* Otherwise, scan the entire FCF table and re-discover SAN */ 4149 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4150 "2770 Start FCF table scan per async FCF " 4151 "event, evt_tag:x%x, index:x%x\n", 4152 acqe_fip->event_tag, acqe_fip->index); 4153 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 4154 LPFC_FCOE_FCF_GET_FIRST); 4155 if (rc) 4156 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4157 "2547 Issue FCF scan read FCF mailbox " 4158 "command failed (x%x)\n", rc); 4159 break; 4160 4161 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 4162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4163 "2548 FCF Table full count 0x%x tag 0x%x\n", 4164 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 4165 acqe_fip->event_tag); 4166 break; 4167 4168 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 4169 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4170 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4171 "2549 FCF (x%x) disconnected from network, " 4172 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 4173 /* 4174 * If we are in the middle of FCF failover process, clear 4175 * the corresponding FCF bit in the roundrobin bitmap. 4176 */ 4177 spin_lock_irq(&phba->hbalock); 4178 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4179 spin_unlock_irq(&phba->hbalock); 4180 /* Update FLOGI FCF failover eligible FCF bmask */ 4181 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 4182 break; 4183 } 4184 spin_unlock_irq(&phba->hbalock); 4185 4186 /* If the event is not for currently used fcf do nothing */ 4187 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 4188 break; 4189 4190 /* 4191 * Otherwise, request the port to rediscover the entire FCF 4192 * table for a fast recovery from case that the current FCF 4193 * is no longer valid as we are not in the middle of FCF 4194 * failover process already. 4195 */ 4196 spin_lock_irq(&phba->hbalock); 4197 /* Mark the fast failover process in progress */ 4198 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 4199 spin_unlock_irq(&phba->hbalock); 4200 4201 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4202 "2771 Start FCF fast failover process due to " 4203 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 4204 "\n", acqe_fip->event_tag, acqe_fip->index); 4205 rc = lpfc_sli4_redisc_fcf_table(phba); 4206 if (rc) { 4207 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4208 LOG_DISCOVERY, 4209 "2772 Issue FCF rediscover mabilbox " 4210 "command failed, fail through to FCF " 4211 "dead event\n"); 4212 spin_lock_irq(&phba->hbalock); 4213 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 4214 spin_unlock_irq(&phba->hbalock); 4215 /* 4216 * Last resort will fail over by treating this 4217 * as a link down to FCF registration. 4218 */ 4219 lpfc_sli4_fcf_dead_failthrough(phba); 4220 } else { 4221 /* Reset FCF roundrobin bmask for new discovery */ 4222 lpfc_sli4_clear_fcf_rr_bmask(phba); 4223 /* 4224 * Handling fast FCF failover to a DEAD FCF event is 4225 * considered equalivant to receiving CVL to all vports. 4226 */ 4227 lpfc_sli4_perform_all_vport_cvl(phba); 4228 } 4229 break; 4230 case LPFC_FIP_EVENT_TYPE_CVL: 4231 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4232 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4233 "2718 Clear Virtual Link Received for VPI 0x%x" 4234 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 4235 4236 vport = lpfc_find_vport_by_vpid(phba, 4237 acqe_fip->index); 4238 ndlp = lpfc_sli4_perform_vport_cvl(vport); 4239 if (!ndlp) 4240 break; 4241 active_vlink_present = 0; 4242 4243 vports = lpfc_create_vport_work_array(phba); 4244 if (vports) { 4245 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 4246 i++) { 4247 if ((!(vports[i]->fc_flag & 4248 FC_VPORT_CVL_RCVD)) && 4249 (vports[i]->port_state > LPFC_FDISC)) { 4250 active_vlink_present = 1; 4251 break; 4252 } 4253 } 4254 lpfc_destroy_vport_work_array(phba, vports); 4255 } 4256 4257 if (active_vlink_present) { 4258 /* 4259 * If there are other active VLinks present, 4260 * re-instantiate the Vlink using FDISC. 4261 */ 4262 mod_timer(&ndlp->nlp_delayfunc, 4263 jiffies + msecs_to_jiffies(1000)); 4264 shost = lpfc_shost_from_vport(vport); 4265 spin_lock_irq(shost->host_lock); 4266 ndlp->nlp_flag |= NLP_DELAY_TMO; 4267 spin_unlock_irq(shost->host_lock); 4268 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 4269 vport->port_state = LPFC_FDISC; 4270 } else { 4271 /* 4272 * Otherwise, we request port to rediscover 4273 * the entire FCF table for a fast recovery 4274 * from possible case that the current FCF 4275 * is no longer valid if we are not already 4276 * in the FCF failover process. 4277 */ 4278 spin_lock_irq(&phba->hbalock); 4279 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4280 spin_unlock_irq(&phba->hbalock); 4281 break; 4282 } 4283 /* Mark the fast failover process in progress */ 4284 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 4285 spin_unlock_irq(&phba->hbalock); 4286 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4287 LOG_DISCOVERY, 4288 "2773 Start FCF failover per CVL, " 4289 "evt_tag:x%x\n", acqe_fip->event_tag); 4290 rc = lpfc_sli4_redisc_fcf_table(phba); 4291 if (rc) { 4292 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4293 LOG_DISCOVERY, 4294 "2774 Issue FCF rediscover " 4295 "mabilbox command failed, " 4296 "through to CVL event\n"); 4297 spin_lock_irq(&phba->hbalock); 4298 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 4299 spin_unlock_irq(&phba->hbalock); 4300 /* 4301 * Last resort will be re-try on the 4302 * the current registered FCF entry. 4303 */ 4304 lpfc_retry_pport_discovery(phba); 4305 } else 4306 /* 4307 * Reset FCF roundrobin bmask for new 4308 * discovery. 4309 */ 4310 lpfc_sli4_clear_fcf_rr_bmask(phba); 4311 } 4312 break; 4313 default: 4314 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4315 "0288 Unknown FCoE event type 0x%x event tag " 4316 "0x%x\n", event_type, acqe_fip->event_tag); 4317 break; 4318 } 4319 } 4320 4321 /** 4322 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 4323 * @phba: pointer to lpfc hba data structure. 4324 * @acqe_link: pointer to the async dcbx completion queue entry. 4325 * 4326 * This routine is to handle the SLI4 asynchronous dcbx event. 4327 **/ 4328 static void 4329 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 4330 struct lpfc_acqe_dcbx *acqe_dcbx) 4331 { 4332 phba->fc_eventTag = acqe_dcbx->event_tag; 4333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4334 "0290 The SLI4 DCBX asynchronous event is not " 4335 "handled yet\n"); 4336 } 4337 4338 /** 4339 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 4340 * @phba: pointer to lpfc hba data structure. 4341 * @acqe_link: pointer to the async grp5 completion queue entry. 4342 * 4343 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 4344 * is an asynchronous notified of a logical link speed change. The Port 4345 * reports the logical link speed in units of 10Mbps. 4346 **/ 4347 static void 4348 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 4349 struct lpfc_acqe_grp5 *acqe_grp5) 4350 { 4351 uint16_t prev_ll_spd; 4352 4353 phba->fc_eventTag = acqe_grp5->event_tag; 4354 phba->fcoe_eventtag = acqe_grp5->event_tag; 4355 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 4356 phba->sli4_hba.link_state.logical_speed = 4357 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 4358 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4359 "2789 GRP5 Async Event: Updating logical link speed " 4360 "from %dMbps to %dMbps\n", prev_ll_spd, 4361 phba->sli4_hba.link_state.logical_speed); 4362 } 4363 4364 /** 4365 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 4366 * @phba: pointer to lpfc hba data structure. 4367 * 4368 * This routine is invoked by the worker thread to process all the pending 4369 * SLI4 asynchronous events. 4370 **/ 4371 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 4372 { 4373 struct lpfc_cq_event *cq_event; 4374 4375 /* First, declare the async event has been handled */ 4376 spin_lock_irq(&phba->hbalock); 4377 phba->hba_flag &= ~ASYNC_EVENT; 4378 spin_unlock_irq(&phba->hbalock); 4379 /* Now, handle all the async events */ 4380 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 4381 /* Get the first event from the head of the event queue */ 4382 spin_lock_irq(&phba->hbalock); 4383 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 4384 cq_event, struct lpfc_cq_event, list); 4385 spin_unlock_irq(&phba->hbalock); 4386 /* Process the asynchronous event */ 4387 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 4388 case LPFC_TRAILER_CODE_LINK: 4389 lpfc_sli4_async_link_evt(phba, 4390 &cq_event->cqe.acqe_link); 4391 break; 4392 case LPFC_TRAILER_CODE_FCOE: 4393 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 4394 break; 4395 case LPFC_TRAILER_CODE_DCBX: 4396 lpfc_sli4_async_dcbx_evt(phba, 4397 &cq_event->cqe.acqe_dcbx); 4398 break; 4399 case LPFC_TRAILER_CODE_GRP5: 4400 lpfc_sli4_async_grp5_evt(phba, 4401 &cq_event->cqe.acqe_grp5); 4402 break; 4403 case LPFC_TRAILER_CODE_FC: 4404 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 4405 break; 4406 case LPFC_TRAILER_CODE_SLI: 4407 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 4408 break; 4409 default: 4410 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4411 "1804 Invalid asynchrous event code: " 4412 "x%x\n", bf_get(lpfc_trailer_code, 4413 &cq_event->cqe.mcqe_cmpl)); 4414 break; 4415 } 4416 /* Free the completion event processed to the free pool */ 4417 lpfc_sli4_cq_event_release(phba, cq_event); 4418 } 4419 } 4420 4421 /** 4422 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 4423 * @phba: pointer to lpfc hba data structure. 4424 * 4425 * This routine is invoked by the worker thread to process FCF table 4426 * rediscovery pending completion event. 4427 **/ 4428 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 4429 { 4430 int rc; 4431 4432 spin_lock_irq(&phba->hbalock); 4433 /* Clear FCF rediscovery timeout event */ 4434 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 4435 /* Clear driver fast failover FCF record flag */ 4436 phba->fcf.failover_rec.flag = 0; 4437 /* Set state for FCF fast failover */ 4438 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 4439 spin_unlock_irq(&phba->hbalock); 4440 4441 /* Scan FCF table from the first entry to re-discover SAN */ 4442 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4443 "2777 Start post-quiescent FCF table scan\n"); 4444 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 4445 if (rc) 4446 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4447 "2747 Issue FCF scan read FCF mailbox " 4448 "command failed 0x%x\n", rc); 4449 } 4450 4451 /** 4452 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 4453 * @phba: pointer to lpfc hba data structure. 4454 * @dev_grp: The HBA PCI-Device group number. 4455 * 4456 * This routine is invoked to set up the per HBA PCI-Device group function 4457 * API jump table entries. 4458 * 4459 * Return: 0 if success, otherwise -ENODEV 4460 **/ 4461 int 4462 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4463 { 4464 int rc; 4465 4466 /* Set up lpfc PCI-device group */ 4467 phba->pci_dev_grp = dev_grp; 4468 4469 /* The LPFC_PCI_DEV_OC uses SLI4 */ 4470 if (dev_grp == LPFC_PCI_DEV_OC) 4471 phba->sli_rev = LPFC_SLI_REV4; 4472 4473 /* Set up device INIT API function jump table */ 4474 rc = lpfc_init_api_table_setup(phba, dev_grp); 4475 if (rc) 4476 return -ENODEV; 4477 /* Set up SCSI API function jump table */ 4478 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 4479 if (rc) 4480 return -ENODEV; 4481 /* Set up SLI API function jump table */ 4482 rc = lpfc_sli_api_table_setup(phba, dev_grp); 4483 if (rc) 4484 return -ENODEV; 4485 /* Set up MBOX API function jump table */ 4486 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 4487 if (rc) 4488 return -ENODEV; 4489 4490 return 0; 4491 } 4492 4493 /** 4494 * lpfc_log_intr_mode - Log the active interrupt mode 4495 * @phba: pointer to lpfc hba data structure. 4496 * @intr_mode: active interrupt mode adopted. 4497 * 4498 * This routine it invoked to log the currently used active interrupt mode 4499 * to the device. 4500 **/ 4501 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 4502 { 4503 switch (intr_mode) { 4504 case 0: 4505 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4506 "0470 Enable INTx interrupt mode.\n"); 4507 break; 4508 case 1: 4509 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4510 "0481 Enabled MSI interrupt mode.\n"); 4511 break; 4512 case 2: 4513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4514 "0480 Enabled MSI-X interrupt mode.\n"); 4515 break; 4516 default: 4517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4518 "0482 Illegal interrupt mode.\n"); 4519 break; 4520 } 4521 return; 4522 } 4523 4524 /** 4525 * lpfc_enable_pci_dev - Enable a generic PCI device. 4526 * @phba: pointer to lpfc hba data structure. 4527 * 4528 * This routine is invoked to enable the PCI device that is common to all 4529 * PCI devices. 4530 * 4531 * Return codes 4532 * 0 - successful 4533 * other values - error 4534 **/ 4535 static int 4536 lpfc_enable_pci_dev(struct lpfc_hba *phba) 4537 { 4538 struct pci_dev *pdev; 4539 int bars = 0; 4540 4541 /* Obtain PCI device reference */ 4542 if (!phba->pcidev) 4543 goto out_error; 4544 else 4545 pdev = phba->pcidev; 4546 /* Select PCI BARs */ 4547 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4548 /* Enable PCI device */ 4549 if (pci_enable_device_mem(pdev)) 4550 goto out_error; 4551 /* Request PCI resource for the device */ 4552 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 4553 goto out_disable_device; 4554 /* Set up device as PCI master and save state for EEH */ 4555 pci_set_master(pdev); 4556 pci_try_set_mwi(pdev); 4557 pci_save_state(pdev); 4558 4559 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4560 if (pci_is_pcie(pdev)) 4561 pdev->needs_freset = 1; 4562 4563 return 0; 4564 4565 out_disable_device: 4566 pci_disable_device(pdev); 4567 out_error: 4568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4569 "1401 Failed to enable pci device, bars:x%x\n", bars); 4570 return -ENODEV; 4571 } 4572 4573 /** 4574 * lpfc_disable_pci_dev - Disable a generic PCI device. 4575 * @phba: pointer to lpfc hba data structure. 4576 * 4577 * This routine is invoked to disable the PCI device that is common to all 4578 * PCI devices. 4579 **/ 4580 static void 4581 lpfc_disable_pci_dev(struct lpfc_hba *phba) 4582 { 4583 struct pci_dev *pdev; 4584 int bars; 4585 4586 /* Obtain PCI device reference */ 4587 if (!phba->pcidev) 4588 return; 4589 else 4590 pdev = phba->pcidev; 4591 /* Select PCI BARs */ 4592 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4593 /* Release PCI resource and disable PCI device */ 4594 pci_release_selected_regions(pdev, bars); 4595 pci_disable_device(pdev); 4596 4597 return; 4598 } 4599 4600 /** 4601 * lpfc_reset_hba - Reset a hba 4602 * @phba: pointer to lpfc hba data structure. 4603 * 4604 * This routine is invoked to reset a hba device. It brings the HBA 4605 * offline, performs a board restart, and then brings the board back 4606 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 4607 * on outstanding mailbox commands. 4608 **/ 4609 void 4610 lpfc_reset_hba(struct lpfc_hba *phba) 4611 { 4612 /* If resets are disabled then set error state and return. */ 4613 if (!phba->cfg_enable_hba_reset) { 4614 phba->link_state = LPFC_HBA_ERROR; 4615 return; 4616 } 4617 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4618 lpfc_offline(phba); 4619 lpfc_sli_brdrestart(phba); 4620 lpfc_online(phba); 4621 lpfc_unblock_mgmt_io(phba); 4622 } 4623 4624 /** 4625 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 4626 * @phba: pointer to lpfc hba data structure. 4627 * 4628 * This function enables the PCI SR-IOV virtual functions to a physical 4629 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4630 * enable the number of virtual functions to the physical function. As 4631 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4632 * API call does not considered as an error condition for most of the device. 4633 **/ 4634 uint16_t 4635 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 4636 { 4637 struct pci_dev *pdev = phba->pcidev; 4638 uint16_t nr_virtfn; 4639 int pos; 4640 4641 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4642 if (pos == 0) 4643 return 0; 4644 4645 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 4646 return nr_virtfn; 4647 } 4648 4649 /** 4650 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4651 * @phba: pointer to lpfc hba data structure. 4652 * @nr_vfn: number of virtual functions to be enabled. 4653 * 4654 * This function enables the PCI SR-IOV virtual functions to a physical 4655 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4656 * enable the number of virtual functions to the physical function. As 4657 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4658 * API call does not considered as an error condition for most of the device. 4659 **/ 4660 int 4661 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4662 { 4663 struct pci_dev *pdev = phba->pcidev; 4664 uint16_t max_nr_vfn; 4665 int rc; 4666 4667 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 4668 if (nr_vfn > max_nr_vfn) { 4669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4670 "3057 Requested vfs (%d) greater than " 4671 "supported vfs (%d)", nr_vfn, max_nr_vfn); 4672 return -EINVAL; 4673 } 4674 4675 rc = pci_enable_sriov(pdev, nr_vfn); 4676 if (rc) { 4677 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4678 "2806 Failed to enable sriov on this device " 4679 "with vfn number nr_vf:%d, rc:%d\n", 4680 nr_vfn, rc); 4681 } else 4682 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4683 "2807 Successful enable sriov on this device " 4684 "with vfn number nr_vf:%d\n", nr_vfn); 4685 return rc; 4686 } 4687 4688 /** 4689 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4690 * @phba: pointer to lpfc hba data structure. 4691 * 4692 * This routine is invoked to set up the driver internal resources specific to 4693 * support the SLI-3 HBA device it attached to. 4694 * 4695 * Return codes 4696 * 0 - successful 4697 * other values - error 4698 **/ 4699 static int 4700 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4701 { 4702 struct lpfc_sli *psli; 4703 int rc; 4704 4705 /* 4706 * Initialize timers used by driver 4707 */ 4708 4709 /* Heartbeat timer */ 4710 init_timer(&phba->hb_tmofunc); 4711 phba->hb_tmofunc.function = lpfc_hb_timeout; 4712 phba->hb_tmofunc.data = (unsigned long)phba; 4713 4714 psli = &phba->sli; 4715 /* MBOX heartbeat timer */ 4716 init_timer(&psli->mbox_tmo); 4717 psli->mbox_tmo.function = lpfc_mbox_timeout; 4718 psli->mbox_tmo.data = (unsigned long) phba; 4719 /* FCP polling mode timer */ 4720 init_timer(&phba->fcp_poll_timer); 4721 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4722 phba->fcp_poll_timer.data = (unsigned long) phba; 4723 /* Fabric block timer */ 4724 init_timer(&phba->fabric_block_timer); 4725 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4726 phba->fabric_block_timer.data = (unsigned long) phba; 4727 /* EA polling mode timer */ 4728 init_timer(&phba->eratt_poll); 4729 phba->eratt_poll.function = lpfc_poll_eratt; 4730 phba->eratt_poll.data = (unsigned long) phba; 4731 4732 /* Host attention work mask setup */ 4733 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4734 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4735 4736 /* Get all the module params for configuring this host */ 4737 lpfc_get_cfgparam(phba); 4738 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4739 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4740 /* check for menlo minimum sg count */ 4741 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4742 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4743 } 4744 4745 if (!phba->sli.ring) 4746 phba->sli.ring = (struct lpfc_sli_ring *) 4747 kzalloc(LPFC_SLI3_MAX_RING * 4748 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4749 if (!phba->sli.ring) 4750 return -ENOMEM; 4751 4752 /* 4753 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 4754 * used to create the sg_dma_buf_pool must be dynamically calculated. 4755 */ 4756 4757 /* Initialize the host templates the configured values. */ 4758 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4759 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4760 4761 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 4762 if (phba->cfg_enable_bg) { 4763 /* 4764 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 4765 * the FCP rsp, and a BDE for each. Sice we have no control 4766 * over how many protection data segments the SCSI Layer 4767 * will hand us (ie: there could be one for every block 4768 * in the IO), we just allocate enough BDEs to accomidate 4769 * our max amount and we need to limit lpfc_sg_seg_cnt to 4770 * minimize the risk of running out. 4771 */ 4772 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4773 sizeof(struct fcp_rsp) + 4774 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); 4775 4776 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 4777 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 4778 4779 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 4780 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4781 } else { 4782 /* 4783 * The scsi_buf for a regular I/O will hold the FCP cmnd, 4784 * the FCP rsp, a BDE for each, and a BDE for up to 4785 * cfg_sg_seg_cnt data segments. 4786 */ 4787 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4788 sizeof(struct fcp_rsp) + 4789 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4790 4791 /* Total BDEs in BPL for scsi_sg_list */ 4792 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 4793 } 4794 4795 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4796 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 4797 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 4798 phba->cfg_total_seg_cnt); 4799 4800 phba->max_vpi = LPFC_MAX_VPI; 4801 /* This will be set to correct value after config_port mbox */ 4802 phba->max_vports = 0; 4803 4804 /* 4805 * Initialize the SLI Layer to run with lpfc HBAs. 4806 */ 4807 lpfc_sli_setup(phba); 4808 lpfc_sli_queue_setup(phba); 4809 4810 /* Allocate device driver memory */ 4811 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4812 return -ENOMEM; 4813 4814 /* 4815 * Enable sr-iov virtual functions if supported and configured 4816 * through the module parameter. 4817 */ 4818 if (phba->cfg_sriov_nr_virtfn > 0) { 4819 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 4820 phba->cfg_sriov_nr_virtfn); 4821 if (rc) { 4822 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4823 "2808 Requested number of SR-IOV " 4824 "virtual functions (%d) is not " 4825 "supported\n", 4826 phba->cfg_sriov_nr_virtfn); 4827 phba->cfg_sriov_nr_virtfn = 0; 4828 } 4829 } 4830 4831 return 0; 4832 } 4833 4834 /** 4835 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 4836 * @phba: pointer to lpfc hba data structure. 4837 * 4838 * This routine is invoked to unset the driver internal resources set up 4839 * specific for supporting the SLI-3 HBA device it attached to. 4840 **/ 4841 static void 4842 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 4843 { 4844 /* Free device driver memory allocated */ 4845 lpfc_mem_free_all(phba); 4846 4847 return; 4848 } 4849 4850 /** 4851 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 4852 * @phba: pointer to lpfc hba data structure. 4853 * 4854 * This routine is invoked to set up the driver internal resources specific to 4855 * support the SLI-4 HBA device it attached to. 4856 * 4857 * Return codes 4858 * 0 - successful 4859 * other values - error 4860 **/ 4861 static int 4862 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4863 { 4864 struct lpfc_vector_map_info *cpup; 4865 struct lpfc_sli *psli; 4866 LPFC_MBOXQ_t *mboxq; 4867 int rc, i, hbq_count, max_buf_size; 4868 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4869 struct lpfc_mqe *mqe; 4870 int longs; 4871 int fof_vectors = 0; 4872 4873 /* Get all the module params for configuring this host */ 4874 lpfc_get_cfgparam(phba); 4875 4876 /* Before proceed, wait for POST done and device ready */ 4877 rc = lpfc_sli4_post_status_check(phba); 4878 if (rc) 4879 return -ENODEV; 4880 4881 /* 4882 * Initialize timers used by driver 4883 */ 4884 4885 /* Heartbeat timer */ 4886 init_timer(&phba->hb_tmofunc); 4887 phba->hb_tmofunc.function = lpfc_hb_timeout; 4888 phba->hb_tmofunc.data = (unsigned long)phba; 4889 init_timer(&phba->rrq_tmr); 4890 phba->rrq_tmr.function = lpfc_rrq_timeout; 4891 phba->rrq_tmr.data = (unsigned long)phba; 4892 4893 psli = &phba->sli; 4894 /* MBOX heartbeat timer */ 4895 init_timer(&psli->mbox_tmo); 4896 psli->mbox_tmo.function = lpfc_mbox_timeout; 4897 psli->mbox_tmo.data = (unsigned long) phba; 4898 /* Fabric block timer */ 4899 init_timer(&phba->fabric_block_timer); 4900 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4901 phba->fabric_block_timer.data = (unsigned long) phba; 4902 /* EA polling mode timer */ 4903 init_timer(&phba->eratt_poll); 4904 phba->eratt_poll.function = lpfc_poll_eratt; 4905 phba->eratt_poll.data = (unsigned long) phba; 4906 /* FCF rediscover timer */ 4907 init_timer(&phba->fcf.redisc_wait); 4908 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4909 phba->fcf.redisc_wait.data = (unsigned long)phba; 4910 4911 /* 4912 * Control structure for handling external multi-buffer mailbox 4913 * command pass-through. 4914 */ 4915 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 4916 sizeof(struct lpfc_mbox_ext_buf_ctx)); 4917 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4918 4919 phba->max_vpi = LPFC_MAX_VPI; 4920 4921 /* This will be set to correct value after the read_config mbox */ 4922 phba->max_vports = 0; 4923 4924 /* Program the default value of vlan_id and fc_map */ 4925 phba->valid_vlan = 0; 4926 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4927 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4928 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4929 4930 /* 4931 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 4932 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. 4933 */ 4934 if (!phba->sli.ring) 4935 phba->sli.ring = kzalloc( 4936 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * 4937 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4938 if (!phba->sli.ring) 4939 return -ENOMEM; 4940 4941 /* 4942 * It doesn't matter what family our adapter is in, we are 4943 * limited to 2 Pages, 512 SGEs, for our SGL. 4944 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 4945 */ 4946 max_buf_size = (2 * SLI4_PAGE_SIZE); 4947 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) 4948 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; 4949 4950 /* 4951 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 4952 * used to create the sg_dma_buf_pool must be dynamically calculated. 4953 */ 4954 4955 if (phba->cfg_enable_bg) { 4956 /* 4957 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 4958 * the FCP rsp, and a SGE for each. Sice we have no control 4959 * over how many protection data segments the SCSI Layer 4960 * will hand us (ie: there could be one for every block 4961 * in the IO), we just allocate enough SGEs to accomidate 4962 * our max amount and we need to limit lpfc_sg_seg_cnt to 4963 * minimize the risk of running out. 4964 */ 4965 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4966 sizeof(struct fcp_rsp) + max_buf_size; 4967 4968 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 4969 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 4970 4971 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) 4972 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; 4973 } else { 4974 /* 4975 * The scsi_buf for a regular I/O will hold the FCP cmnd, 4976 * the FCP rsp, a SGE for each, and a SGE for up to 4977 * cfg_sg_seg_cnt data segments. 4978 */ 4979 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4980 sizeof(struct fcp_rsp) + 4981 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 4982 4983 /* Total SGEs for scsi_sg_list */ 4984 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 4985 /* 4986 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need 4987 * to post 1 page for the SGL. 4988 */ 4989 } 4990 4991 /* Initialize the host templates with the updated values. */ 4992 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4993 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4994 4995 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 4996 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 4997 else 4998 phba->cfg_sg_dma_buf_size = 4999 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 5000 5001 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5002 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", 5003 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5004 phba->cfg_total_seg_cnt); 5005 5006 /* Initialize buffer queue management fields */ 5007 hbq_count = lpfc_sli_hbq_count(); 5008 for (i = 0; i < hbq_count; ++i) 5009 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5010 INIT_LIST_HEAD(&phba->rb_pend_list); 5011 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 5012 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 5013 5014 /* 5015 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 5016 */ 5017 /* Initialize the Abort scsi buffer list used by driver */ 5018 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 5019 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 5020 /* This abort list used by worker thread */ 5021 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 5022 5023 /* 5024 * Initialize driver internal slow-path work queues 5025 */ 5026 5027 /* Driver internel slow-path CQ Event pool */ 5028 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 5029 /* Response IOCB work queue list */ 5030 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 5031 /* Asynchronous event CQ Event work queue list */ 5032 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 5033 /* Fast-path XRI aborted CQ Event work queue list */ 5034 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 5035 /* Slow-path XRI aborted CQ Event work queue list */ 5036 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 5037 /* Receive queue CQ Event work queue list */ 5038 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 5039 5040 /* Initialize extent block lists. */ 5041 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 5042 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 5043 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 5044 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 5045 5046 /* Initialize the driver internal SLI layer lists. */ 5047 lpfc_sli_setup(phba); 5048 lpfc_sli_queue_setup(phba); 5049 5050 /* Allocate device driver memory */ 5051 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 5052 if (rc) 5053 return -ENOMEM; 5054 5055 /* IF Type 2 ports get initialized now. */ 5056 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5057 LPFC_SLI_INTF_IF_TYPE_2) { 5058 rc = lpfc_pci_function_reset(phba); 5059 if (unlikely(rc)) 5060 return -ENODEV; 5061 } 5062 5063 /* Create the bootstrap mailbox command */ 5064 rc = lpfc_create_bootstrap_mbox(phba); 5065 if (unlikely(rc)) 5066 goto out_free_mem; 5067 5068 /* Set up the host's endian order with the device. */ 5069 rc = lpfc_setup_endian_order(phba); 5070 if (unlikely(rc)) 5071 goto out_free_bsmbx; 5072 5073 /* Set up the hba's configuration parameters. */ 5074 rc = lpfc_sli4_read_config(phba); 5075 if (unlikely(rc)) 5076 goto out_free_bsmbx; 5077 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 5078 if (unlikely(rc)) 5079 goto out_free_bsmbx; 5080 5081 /* IF Type 0 ports get initialized now. */ 5082 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5083 LPFC_SLI_INTF_IF_TYPE_0) { 5084 rc = lpfc_pci_function_reset(phba); 5085 if (unlikely(rc)) 5086 goto out_free_bsmbx; 5087 } 5088 5089 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5090 GFP_KERNEL); 5091 if (!mboxq) { 5092 rc = -ENOMEM; 5093 goto out_free_bsmbx; 5094 } 5095 5096 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 5097 lpfc_supported_pages(mboxq); 5098 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5099 if (!rc) { 5100 mqe = &mboxq->u.mqe; 5101 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 5102 LPFC_MAX_SUPPORTED_PAGES); 5103 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 5104 switch (pn_page[i]) { 5105 case LPFC_SLI4_PARAMETERS: 5106 phba->sli4_hba.pc_sli4_params.supported = 1; 5107 break; 5108 default: 5109 break; 5110 } 5111 } 5112 /* Read the port's SLI4 Parameters capabilities if supported. */ 5113 if (phba->sli4_hba.pc_sli4_params.supported) 5114 rc = lpfc_pc_sli4_params_get(phba, mboxq); 5115 if (rc) { 5116 mempool_free(mboxq, phba->mbox_mem_pool); 5117 rc = -EIO; 5118 goto out_free_bsmbx; 5119 } 5120 } 5121 /* 5122 * Get sli4 parameters that override parameters from Port capabilities. 5123 * If this call fails, it isn't critical unless the SLI4 parameters come 5124 * back in conflict. 5125 */ 5126 rc = lpfc_get_sli4_parameters(phba, mboxq); 5127 if (rc) { 5128 if (phba->sli4_hba.extents_in_use && 5129 phba->sli4_hba.rpi_hdrs_in_use) { 5130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5131 "2999 Unsupported SLI4 Parameters " 5132 "Extents and RPI headers enabled.\n"); 5133 goto out_free_bsmbx; 5134 } 5135 } 5136 mempool_free(mboxq, phba->mbox_mem_pool); 5137 5138 /* Verify OAS is supported */ 5139 lpfc_sli4_oas_verify(phba); 5140 if (phba->cfg_fof) 5141 fof_vectors = 1; 5142 5143 /* Verify all the SLI4 queues */ 5144 rc = lpfc_sli4_queue_verify(phba); 5145 if (rc) 5146 goto out_free_bsmbx; 5147 5148 /* Create driver internal CQE event pool */ 5149 rc = lpfc_sli4_cq_event_pool_create(phba); 5150 if (rc) 5151 goto out_free_bsmbx; 5152 5153 /* Initialize sgl lists per host */ 5154 lpfc_init_sgl_list(phba); 5155 5156 /* Allocate and initialize active sgl array */ 5157 rc = lpfc_init_active_sgl_array(phba); 5158 if (rc) { 5159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5160 "1430 Failed to initialize sgl list.\n"); 5161 goto out_destroy_cq_event_pool; 5162 } 5163 rc = lpfc_sli4_init_rpi_hdrs(phba); 5164 if (rc) { 5165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5166 "1432 Failed to initialize rpi headers.\n"); 5167 goto out_free_active_sgl; 5168 } 5169 5170 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 5171 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 5172 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 5173 GFP_KERNEL); 5174 if (!phba->fcf.fcf_rr_bmask) { 5175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5176 "2759 Failed allocate memory for FCF round " 5177 "robin failover bmask\n"); 5178 rc = -ENOMEM; 5179 goto out_remove_rpi_hdrs; 5180 } 5181 5182 phba->sli4_hba.fcp_eq_hdl = 5183 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 5184 (fof_vectors + phba->cfg_fcp_io_channel)), 5185 GFP_KERNEL); 5186 if (!phba->sli4_hba.fcp_eq_hdl) { 5187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5188 "2572 Failed allocate memory for " 5189 "fast-path per-EQ handle array\n"); 5190 rc = -ENOMEM; 5191 goto out_free_fcf_rr_bmask; 5192 } 5193 5194 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 5195 (fof_vectors + 5196 phba->cfg_fcp_io_channel)), GFP_KERNEL); 5197 if (!phba->sli4_hba.msix_entries) { 5198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5199 "2573 Failed allocate memory for msi-x " 5200 "interrupt vector entries\n"); 5201 rc = -ENOMEM; 5202 goto out_free_fcp_eq_hdl; 5203 } 5204 5205 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * 5206 phba->sli4_hba.num_present_cpu), 5207 GFP_KERNEL); 5208 if (!phba->sli4_hba.cpu_map) { 5209 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5210 "3327 Failed allocate memory for msi-x " 5211 "interrupt vector mapping\n"); 5212 rc = -ENOMEM; 5213 goto out_free_msix; 5214 } 5215 if (lpfc_used_cpu == NULL) { 5216 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), 5217 GFP_KERNEL); 5218 if (!lpfc_used_cpu) { 5219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5220 "3335 Failed allocate memory for msi-x " 5221 "interrupt vector mapping\n"); 5222 kfree(phba->sli4_hba.cpu_map); 5223 rc = -ENOMEM; 5224 goto out_free_msix; 5225 } 5226 for (i = 0; i < lpfc_present_cpu; i++) 5227 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; 5228 } 5229 5230 /* Initialize io channels for round robin */ 5231 cpup = phba->sli4_hba.cpu_map; 5232 rc = 0; 5233 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 5234 cpup->channel_id = rc; 5235 rc++; 5236 if (rc >= phba->cfg_fcp_io_channel) 5237 rc = 0; 5238 } 5239 5240 /* 5241 * Enable sr-iov virtual functions if supported and configured 5242 * through the module parameter. 5243 */ 5244 if (phba->cfg_sriov_nr_virtfn > 0) { 5245 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5246 phba->cfg_sriov_nr_virtfn); 5247 if (rc) { 5248 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5249 "3020 Requested number of SR-IOV " 5250 "virtual functions (%d) is not " 5251 "supported\n", 5252 phba->cfg_sriov_nr_virtfn); 5253 phba->cfg_sriov_nr_virtfn = 0; 5254 } 5255 } 5256 5257 return 0; 5258 5259 out_free_msix: 5260 kfree(phba->sli4_hba.msix_entries); 5261 out_free_fcp_eq_hdl: 5262 kfree(phba->sli4_hba.fcp_eq_hdl); 5263 out_free_fcf_rr_bmask: 5264 kfree(phba->fcf.fcf_rr_bmask); 5265 out_remove_rpi_hdrs: 5266 lpfc_sli4_remove_rpi_hdrs(phba); 5267 out_free_active_sgl: 5268 lpfc_free_active_sgl(phba); 5269 out_destroy_cq_event_pool: 5270 lpfc_sli4_cq_event_pool_destroy(phba); 5271 out_free_bsmbx: 5272 lpfc_destroy_bootstrap_mbox(phba); 5273 out_free_mem: 5274 lpfc_mem_free(phba); 5275 return rc; 5276 } 5277 5278 /** 5279 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 5280 * @phba: pointer to lpfc hba data structure. 5281 * 5282 * This routine is invoked to unset the driver internal resources set up 5283 * specific for supporting the SLI-4 HBA device it attached to. 5284 **/ 5285 static void 5286 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 5287 { 5288 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 5289 5290 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 5291 kfree(phba->sli4_hba.cpu_map); 5292 phba->sli4_hba.num_present_cpu = 0; 5293 phba->sli4_hba.num_online_cpu = 0; 5294 phba->sli4_hba.curr_disp_cpu = 0; 5295 5296 /* Free memory allocated for msi-x interrupt vector entries */ 5297 kfree(phba->sli4_hba.msix_entries); 5298 5299 /* Free memory allocated for fast-path work queue handles */ 5300 kfree(phba->sli4_hba.fcp_eq_hdl); 5301 5302 /* Free the allocated rpi headers. */ 5303 lpfc_sli4_remove_rpi_hdrs(phba); 5304 lpfc_sli4_remove_rpis(phba); 5305 5306 /* Free eligible FCF index bmask */ 5307 kfree(phba->fcf.fcf_rr_bmask); 5308 5309 /* Free the ELS sgl list */ 5310 lpfc_free_active_sgl(phba); 5311 lpfc_free_els_sgl_list(phba); 5312 5313 /* Free the completion queue EQ event pool */ 5314 lpfc_sli4_cq_event_release_all(phba); 5315 lpfc_sli4_cq_event_pool_destroy(phba); 5316 5317 /* Release resource identifiers. */ 5318 lpfc_sli4_dealloc_resource_identifiers(phba); 5319 5320 /* Free the bsmbx region. */ 5321 lpfc_destroy_bootstrap_mbox(phba); 5322 5323 /* Free the SLI Layer memory with SLI4 HBAs */ 5324 lpfc_mem_free_all(phba); 5325 5326 /* Free the current connect table */ 5327 list_for_each_entry_safe(conn_entry, next_conn_entry, 5328 &phba->fcf_conn_rec_list, list) { 5329 list_del_init(&conn_entry->list); 5330 kfree(conn_entry); 5331 } 5332 5333 return; 5334 } 5335 5336 /** 5337 * lpfc_init_api_table_setup - Set up init api function jump table 5338 * @phba: The hba struct for which this call is being executed. 5339 * @dev_grp: The HBA PCI-Device group number. 5340 * 5341 * This routine sets up the device INIT interface API function jump table 5342 * in @phba struct. 5343 * 5344 * Returns: 0 - success, -ENODEV - failure. 5345 **/ 5346 int 5347 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5348 { 5349 phba->lpfc_hba_init_link = lpfc_hba_init_link; 5350 phba->lpfc_hba_down_link = lpfc_hba_down_link; 5351 phba->lpfc_selective_reset = lpfc_selective_reset; 5352 switch (dev_grp) { 5353 case LPFC_PCI_DEV_LP: 5354 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 5355 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 5356 phba->lpfc_stop_port = lpfc_stop_port_s3; 5357 break; 5358 case LPFC_PCI_DEV_OC: 5359 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 5360 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 5361 phba->lpfc_stop_port = lpfc_stop_port_s4; 5362 break; 5363 default: 5364 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5365 "1431 Invalid HBA PCI-device group: 0x%x\n", 5366 dev_grp); 5367 return -ENODEV; 5368 break; 5369 } 5370 return 0; 5371 } 5372 5373 /** 5374 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 5375 * @phba: pointer to lpfc hba data structure. 5376 * 5377 * This routine is invoked to set up the driver internal resources before the 5378 * device specific resource setup to support the HBA device it attached to. 5379 * 5380 * Return codes 5381 * 0 - successful 5382 * other values - error 5383 **/ 5384 static int 5385 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 5386 { 5387 /* 5388 * Driver resources common to all SLI revisions 5389 */ 5390 atomic_set(&phba->fast_event_count, 0); 5391 spin_lock_init(&phba->hbalock); 5392 5393 /* Initialize ndlp management spinlock */ 5394 spin_lock_init(&phba->ndlp_lock); 5395 5396 INIT_LIST_HEAD(&phba->port_list); 5397 INIT_LIST_HEAD(&phba->work_list); 5398 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5399 5400 /* Initialize the wait queue head for the kernel thread */ 5401 init_waitqueue_head(&phba->work_waitq); 5402 5403 /* Initialize the scsi buffer list used by driver for scsi IO */ 5404 spin_lock_init(&phba->scsi_buf_list_get_lock); 5405 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 5406 spin_lock_init(&phba->scsi_buf_list_put_lock); 5407 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 5408 5409 /* Initialize the fabric iocb list */ 5410 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5411 5412 /* Initialize list to save ELS buffers */ 5413 INIT_LIST_HEAD(&phba->elsbuf); 5414 5415 /* Initialize FCF connection rec list */ 5416 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5417 5418 /* Initialize OAS configuration list */ 5419 spin_lock_init(&phba->devicelock); 5420 INIT_LIST_HEAD(&phba->luns); 5421 5422 return 0; 5423 } 5424 5425 /** 5426 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 5427 * @phba: pointer to lpfc hba data structure. 5428 * 5429 * This routine is invoked to set up the driver internal resources after the 5430 * device specific resource setup to support the HBA device it attached to. 5431 * 5432 * Return codes 5433 * 0 - successful 5434 * other values - error 5435 **/ 5436 static int 5437 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 5438 { 5439 int error; 5440 5441 /* Startup the kernel thread for this host adapter. */ 5442 phba->worker_thread = kthread_run(lpfc_do_work, phba, 5443 "lpfc_worker_%d", phba->brd_no); 5444 if (IS_ERR(phba->worker_thread)) { 5445 error = PTR_ERR(phba->worker_thread); 5446 return error; 5447 } 5448 5449 return 0; 5450 } 5451 5452 /** 5453 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 5454 * @phba: pointer to lpfc hba data structure. 5455 * 5456 * This routine is invoked to unset the driver internal resources set up after 5457 * the device specific resource setup for supporting the HBA device it 5458 * attached to. 5459 **/ 5460 static void 5461 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 5462 { 5463 /* Stop kernel worker thread */ 5464 kthread_stop(phba->worker_thread); 5465 } 5466 5467 /** 5468 * lpfc_free_iocb_list - Free iocb list. 5469 * @phba: pointer to lpfc hba data structure. 5470 * 5471 * This routine is invoked to free the driver's IOCB list and memory. 5472 **/ 5473 static void 5474 lpfc_free_iocb_list(struct lpfc_hba *phba) 5475 { 5476 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 5477 5478 spin_lock_irq(&phba->hbalock); 5479 list_for_each_entry_safe(iocbq_entry, iocbq_next, 5480 &phba->lpfc_iocb_list, list) { 5481 list_del(&iocbq_entry->list); 5482 kfree(iocbq_entry); 5483 phba->total_iocbq_bufs--; 5484 } 5485 spin_unlock_irq(&phba->hbalock); 5486 5487 return; 5488 } 5489 5490 /** 5491 * lpfc_init_iocb_list - Allocate and initialize iocb list. 5492 * @phba: pointer to lpfc hba data structure. 5493 * 5494 * This routine is invoked to allocate and initizlize the driver's IOCB 5495 * list and set up the IOCB tag array accordingly. 5496 * 5497 * Return codes 5498 * 0 - successful 5499 * other values - error 5500 **/ 5501 static int 5502 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 5503 { 5504 struct lpfc_iocbq *iocbq_entry = NULL; 5505 uint16_t iotag; 5506 int i; 5507 5508 /* Initialize and populate the iocb list per host. */ 5509 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 5510 for (i = 0; i < iocb_count; i++) { 5511 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 5512 if (iocbq_entry == NULL) { 5513 printk(KERN_ERR "%s: only allocated %d iocbs of " 5514 "expected %d count. Unloading driver.\n", 5515 __func__, i, LPFC_IOCB_LIST_CNT); 5516 goto out_free_iocbq; 5517 } 5518 5519 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 5520 if (iotag == 0) { 5521 kfree(iocbq_entry); 5522 printk(KERN_ERR "%s: failed to allocate IOTAG. " 5523 "Unloading driver.\n", __func__); 5524 goto out_free_iocbq; 5525 } 5526 iocbq_entry->sli4_lxritag = NO_XRI; 5527 iocbq_entry->sli4_xritag = NO_XRI; 5528 5529 spin_lock_irq(&phba->hbalock); 5530 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 5531 phba->total_iocbq_bufs++; 5532 spin_unlock_irq(&phba->hbalock); 5533 } 5534 5535 return 0; 5536 5537 out_free_iocbq: 5538 lpfc_free_iocb_list(phba); 5539 5540 return -ENOMEM; 5541 } 5542 5543 /** 5544 * lpfc_free_sgl_list - Free a given sgl list. 5545 * @phba: pointer to lpfc hba data structure. 5546 * @sglq_list: pointer to the head of sgl list. 5547 * 5548 * This routine is invoked to free a give sgl list and memory. 5549 **/ 5550 void 5551 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 5552 { 5553 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 5554 5555 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 5556 list_del(&sglq_entry->list); 5557 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 5558 kfree(sglq_entry); 5559 } 5560 } 5561 5562 /** 5563 * lpfc_free_els_sgl_list - Free els sgl list. 5564 * @phba: pointer to lpfc hba data structure. 5565 * 5566 * This routine is invoked to free the driver's els sgl list and memory. 5567 **/ 5568 static void 5569 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 5570 { 5571 LIST_HEAD(sglq_list); 5572 5573 /* Retrieve all els sgls from driver list */ 5574 spin_lock_irq(&phba->hbalock); 5575 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 5576 spin_unlock_irq(&phba->hbalock); 5577 5578 /* Now free the sgl list */ 5579 lpfc_free_sgl_list(phba, &sglq_list); 5580 } 5581 5582 /** 5583 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 5584 * @phba: pointer to lpfc hba data structure. 5585 * 5586 * This routine is invoked to allocate the driver's active sgl memory. 5587 * This array will hold the sglq_entry's for active IOs. 5588 **/ 5589 static int 5590 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 5591 { 5592 int size; 5593 size = sizeof(struct lpfc_sglq *); 5594 size *= phba->sli4_hba.max_cfg_param.max_xri; 5595 5596 phba->sli4_hba.lpfc_sglq_active_list = 5597 kzalloc(size, GFP_KERNEL); 5598 if (!phba->sli4_hba.lpfc_sglq_active_list) 5599 return -ENOMEM; 5600 return 0; 5601 } 5602 5603 /** 5604 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 5605 * @phba: pointer to lpfc hba data structure. 5606 * 5607 * This routine is invoked to walk through the array of active sglq entries 5608 * and free all of the resources. 5609 * This is just a place holder for now. 5610 **/ 5611 static void 5612 lpfc_free_active_sgl(struct lpfc_hba *phba) 5613 { 5614 kfree(phba->sli4_hba.lpfc_sglq_active_list); 5615 } 5616 5617 /** 5618 * lpfc_init_sgl_list - Allocate and initialize sgl list. 5619 * @phba: pointer to lpfc hba data structure. 5620 * 5621 * This routine is invoked to allocate and initizlize the driver's sgl 5622 * list and set up the sgl xritag tag array accordingly. 5623 * 5624 **/ 5625 static void 5626 lpfc_init_sgl_list(struct lpfc_hba *phba) 5627 { 5628 /* Initialize and populate the sglq list per host/VF. */ 5629 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 5630 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 5631 5632 /* els xri-sgl book keeping */ 5633 phba->sli4_hba.els_xri_cnt = 0; 5634 5635 /* scsi xri-buffer book keeping */ 5636 phba->sli4_hba.scsi_xri_cnt = 0; 5637 } 5638 5639 /** 5640 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 5641 * @phba: pointer to lpfc hba data structure. 5642 * 5643 * This routine is invoked to post rpi header templates to the 5644 * port for those SLI4 ports that do not support extents. This routine 5645 * posts a PAGE_SIZE memory region to the port to hold up to 5646 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 5647 * and should be called only when interrupts are disabled. 5648 * 5649 * Return codes 5650 * 0 - successful 5651 * -ERROR - otherwise. 5652 **/ 5653 int 5654 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5655 { 5656 int rc = 0; 5657 struct lpfc_rpi_hdr *rpi_hdr; 5658 5659 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5660 if (!phba->sli4_hba.rpi_hdrs_in_use) 5661 return rc; 5662 if (phba->sli4_hba.extents_in_use) 5663 return -EIO; 5664 5665 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5666 if (!rpi_hdr) { 5667 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5668 "0391 Error during rpi post operation\n"); 5669 lpfc_sli4_remove_rpis(phba); 5670 rc = -ENODEV; 5671 } 5672 5673 return rc; 5674 } 5675 5676 /** 5677 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 5678 * @phba: pointer to lpfc hba data structure. 5679 * 5680 * This routine is invoked to allocate a single 4KB memory region to 5681 * support rpis and stores them in the phba. This single region 5682 * provides support for up to 64 rpis. The region is used globally 5683 * by the device. 5684 * 5685 * Returns: 5686 * A valid rpi hdr on success. 5687 * A NULL pointer on any failure. 5688 **/ 5689 struct lpfc_rpi_hdr * 5690 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 5691 { 5692 uint16_t rpi_limit, curr_rpi_range; 5693 struct lpfc_dmabuf *dmabuf; 5694 struct lpfc_rpi_hdr *rpi_hdr; 5695 uint32_t rpi_count; 5696 5697 /* 5698 * If the SLI4 port supports extents, posting the rpi header isn't 5699 * required. Set the expected maximum count and let the actual value 5700 * get set when extents are fully allocated. 5701 */ 5702 if (!phba->sli4_hba.rpi_hdrs_in_use) 5703 return NULL; 5704 if (phba->sli4_hba.extents_in_use) 5705 return NULL; 5706 5707 /* The limit on the logical index is just the max_rpi count. */ 5708 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5709 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5710 5711 spin_lock_irq(&phba->hbalock); 5712 /* 5713 * Establish the starting RPI in this header block. The starting 5714 * rpi is normalized to a zero base because the physical rpi is 5715 * port based. 5716 */ 5717 curr_rpi_range = phba->sli4_hba.next_rpi; 5718 spin_unlock_irq(&phba->hbalock); 5719 5720 /* 5721 * The port has a limited number of rpis. The increment here 5722 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 5723 * and to allow the full max_rpi range per port. 5724 */ 5725 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 5726 rpi_count = rpi_limit - curr_rpi_range; 5727 else 5728 rpi_count = LPFC_RPI_HDR_COUNT; 5729 5730 if (!rpi_count) 5731 return NULL; 5732 /* 5733 * First allocate the protocol header region for the port. The 5734 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5735 */ 5736 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5737 if (!dmabuf) 5738 return NULL; 5739 5740 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5741 LPFC_HDR_TEMPLATE_SIZE, 5742 &dmabuf->phys, 5743 GFP_KERNEL); 5744 if (!dmabuf->virt) { 5745 rpi_hdr = NULL; 5746 goto err_free_dmabuf; 5747 } 5748 5749 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 5750 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 5751 rpi_hdr = NULL; 5752 goto err_free_coherent; 5753 } 5754 5755 /* Save the rpi header data for cleanup later. */ 5756 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 5757 if (!rpi_hdr) 5758 goto err_free_coherent; 5759 5760 rpi_hdr->dmabuf = dmabuf; 5761 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5762 rpi_hdr->page_count = 1; 5763 spin_lock_irq(&phba->hbalock); 5764 5765 /* The rpi_hdr stores the logical index only. */ 5766 rpi_hdr->start_rpi = curr_rpi_range; 5767 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5768 5769 /* 5770 * The next_rpi stores the next logical module-64 rpi value used 5771 * to post physical rpis in subsequent rpi postings. 5772 */ 5773 phba->sli4_hba.next_rpi += rpi_count; 5774 spin_unlock_irq(&phba->hbalock); 5775 return rpi_hdr; 5776 5777 err_free_coherent: 5778 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 5779 dmabuf->virt, dmabuf->phys); 5780 err_free_dmabuf: 5781 kfree(dmabuf); 5782 return NULL; 5783 } 5784 5785 /** 5786 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 5787 * @phba: pointer to lpfc hba data structure. 5788 * 5789 * This routine is invoked to remove all memory resources allocated 5790 * to support rpis for SLI4 ports not supporting extents. This routine 5791 * presumes the caller has released all rpis consumed by fabric or port 5792 * logins and is prepared to have the header pages removed. 5793 **/ 5794 void 5795 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5796 { 5797 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5798 5799 if (!phba->sli4_hba.rpi_hdrs_in_use) 5800 goto exit; 5801 5802 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5803 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5804 list_del(&rpi_hdr->list); 5805 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 5806 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 5807 kfree(rpi_hdr->dmabuf); 5808 kfree(rpi_hdr); 5809 } 5810 exit: 5811 /* There are no rpis available to the port now. */ 5812 phba->sli4_hba.next_rpi = 0; 5813 } 5814 5815 /** 5816 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 5817 * @pdev: pointer to pci device data structure. 5818 * 5819 * This routine is invoked to allocate the driver hba data structure for an 5820 * HBA device. If the allocation is successful, the phba reference to the 5821 * PCI device data structure is set. 5822 * 5823 * Return codes 5824 * pointer to @phba - successful 5825 * NULL - error 5826 **/ 5827 static struct lpfc_hba * 5828 lpfc_hba_alloc(struct pci_dev *pdev) 5829 { 5830 struct lpfc_hba *phba; 5831 5832 /* Allocate memory for HBA structure */ 5833 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 5834 if (!phba) { 5835 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 5836 return NULL; 5837 } 5838 5839 /* Set reference to PCI device in HBA structure */ 5840 phba->pcidev = pdev; 5841 5842 /* Assign an unused board number */ 5843 phba->brd_no = lpfc_get_instance(); 5844 if (phba->brd_no < 0) { 5845 kfree(phba); 5846 return NULL; 5847 } 5848 5849 spin_lock_init(&phba->ct_ev_lock); 5850 INIT_LIST_HEAD(&phba->ct_ev_waiters); 5851 5852 return phba; 5853 } 5854 5855 /** 5856 * lpfc_hba_free - Free driver hba data structure with a device. 5857 * @phba: pointer to lpfc hba data structure. 5858 * 5859 * This routine is invoked to free the driver hba data structure with an 5860 * HBA device. 5861 **/ 5862 static void 5863 lpfc_hba_free(struct lpfc_hba *phba) 5864 { 5865 /* Release the driver assigned board number */ 5866 idr_remove(&lpfc_hba_index, phba->brd_no); 5867 5868 /* Free memory allocated with sli rings */ 5869 kfree(phba->sli.ring); 5870 phba->sli.ring = NULL; 5871 5872 kfree(phba); 5873 return; 5874 } 5875 5876 /** 5877 * lpfc_create_shost - Create hba physical port with associated scsi host. 5878 * @phba: pointer to lpfc hba data structure. 5879 * 5880 * This routine is invoked to create HBA physical port and associate a SCSI 5881 * host with it. 5882 * 5883 * Return codes 5884 * 0 - successful 5885 * other values - error 5886 **/ 5887 static int 5888 lpfc_create_shost(struct lpfc_hba *phba) 5889 { 5890 struct lpfc_vport *vport; 5891 struct Scsi_Host *shost; 5892 5893 /* Initialize HBA FC structure */ 5894 phba->fc_edtov = FF_DEF_EDTOV; 5895 phba->fc_ratov = FF_DEF_RATOV; 5896 phba->fc_altov = FF_DEF_ALTOV; 5897 phba->fc_arbtov = FF_DEF_ARBTOV; 5898 5899 atomic_set(&phba->sdev_cnt, 0); 5900 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 5901 if (!vport) 5902 return -ENODEV; 5903 5904 shost = lpfc_shost_from_vport(vport); 5905 phba->pport = vport; 5906 lpfc_debugfs_initialize(vport); 5907 /* Put reference to SCSI host to driver's device private data */ 5908 pci_set_drvdata(phba->pcidev, shost); 5909 5910 return 0; 5911 } 5912 5913 /** 5914 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 5915 * @phba: pointer to lpfc hba data structure. 5916 * 5917 * This routine is invoked to destroy HBA physical port and the associated 5918 * SCSI host. 5919 **/ 5920 static void 5921 lpfc_destroy_shost(struct lpfc_hba *phba) 5922 { 5923 struct lpfc_vport *vport = phba->pport; 5924 5925 /* Destroy physical port that associated with the SCSI host */ 5926 destroy_port(vport); 5927 5928 return; 5929 } 5930 5931 /** 5932 * lpfc_setup_bg - Setup Block guard structures and debug areas. 5933 * @phba: pointer to lpfc hba data structure. 5934 * @shost: the shost to be used to detect Block guard settings. 5935 * 5936 * This routine sets up the local Block guard protocol settings for @shost. 5937 * This routine also allocates memory for debugging bg buffers. 5938 **/ 5939 static void 5940 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5941 { 5942 uint32_t old_mask; 5943 uint32_t old_guard; 5944 5945 int pagecnt = 10; 5946 if (lpfc_prot_mask && lpfc_prot_guard) { 5947 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5948 "1478 Registering BlockGuard with the " 5949 "SCSI layer\n"); 5950 5951 old_mask = lpfc_prot_mask; 5952 old_guard = lpfc_prot_guard; 5953 5954 /* Only allow supported values */ 5955 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 5956 SHOST_DIX_TYPE0_PROTECTION | 5957 SHOST_DIX_TYPE1_PROTECTION); 5958 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); 5959 5960 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 5961 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 5962 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 5963 5964 if (lpfc_prot_mask && lpfc_prot_guard) { 5965 if ((old_mask != lpfc_prot_mask) || 5966 (old_guard != lpfc_prot_guard)) 5967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5968 "1475 Registering BlockGuard with the " 5969 "SCSI layer: mask %d guard %d\n", 5970 lpfc_prot_mask, lpfc_prot_guard); 5971 5972 scsi_host_set_prot(shost, lpfc_prot_mask); 5973 scsi_host_set_guard(shost, lpfc_prot_guard); 5974 } else 5975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5976 "1479 Not Registering BlockGuard with the SCSI " 5977 "layer, Bad protection parameters: %d %d\n", 5978 old_mask, old_guard); 5979 } 5980 5981 if (!_dump_buf_data) { 5982 while (pagecnt) { 5983 spin_lock_init(&_dump_buf_lock); 5984 _dump_buf_data = 5985 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5986 if (_dump_buf_data) { 5987 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5988 "9043 BLKGRD: allocated %d pages for " 5989 "_dump_buf_data at 0x%p\n", 5990 (1 << pagecnt), _dump_buf_data); 5991 _dump_buf_data_order = pagecnt; 5992 memset(_dump_buf_data, 0, 5993 ((1 << PAGE_SHIFT) << pagecnt)); 5994 break; 5995 } else 5996 --pagecnt; 5997 } 5998 if (!_dump_buf_data_order) 5999 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6000 "9044 BLKGRD: ERROR unable to allocate " 6001 "memory for hexdump\n"); 6002 } else 6003 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6004 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 6005 "\n", _dump_buf_data); 6006 if (!_dump_buf_dif) { 6007 while (pagecnt) { 6008 _dump_buf_dif = 6009 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6010 if (_dump_buf_dif) { 6011 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6012 "9046 BLKGRD: allocated %d pages for " 6013 "_dump_buf_dif at 0x%p\n", 6014 (1 << pagecnt), _dump_buf_dif); 6015 _dump_buf_dif_order = pagecnt; 6016 memset(_dump_buf_dif, 0, 6017 ((1 << PAGE_SHIFT) << pagecnt)); 6018 break; 6019 } else 6020 --pagecnt; 6021 } 6022 if (!_dump_buf_dif_order) 6023 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6024 "9047 BLKGRD: ERROR unable to allocate " 6025 "memory for hexdump\n"); 6026 } else 6027 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6028 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 6029 _dump_buf_dif); 6030 } 6031 6032 /** 6033 * lpfc_post_init_setup - Perform necessary device post initialization setup. 6034 * @phba: pointer to lpfc hba data structure. 6035 * 6036 * This routine is invoked to perform all the necessary post initialization 6037 * setup for the device. 6038 **/ 6039 static void 6040 lpfc_post_init_setup(struct lpfc_hba *phba) 6041 { 6042 struct Scsi_Host *shost; 6043 struct lpfc_adapter_event_header adapter_event; 6044 6045 /* Get the default values for Model Name and Description */ 6046 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 6047 6048 /* 6049 * hba setup may have changed the hba_queue_depth so we need to 6050 * adjust the value of can_queue. 6051 */ 6052 shost = pci_get_drvdata(phba->pcidev); 6053 shost->can_queue = phba->cfg_hba_queue_depth - 10; 6054 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 6055 lpfc_setup_bg(phba, shost); 6056 6057 lpfc_host_attrib_init(shost); 6058 6059 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 6060 spin_lock_irq(shost->host_lock); 6061 lpfc_poll_start_timer(phba); 6062 spin_unlock_irq(shost->host_lock); 6063 } 6064 6065 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6066 "0428 Perform SCSI scan\n"); 6067 /* Send board arrival event to upper layer */ 6068 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 6069 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 6070 fc_host_post_vendor_event(shost, fc_get_event_number(), 6071 sizeof(adapter_event), 6072 (char *) &adapter_event, 6073 LPFC_NL_VENDOR_ID); 6074 return; 6075 } 6076 6077 /** 6078 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 6079 * @phba: pointer to lpfc hba data structure. 6080 * 6081 * This routine is invoked to set up the PCI device memory space for device 6082 * with SLI-3 interface spec. 6083 * 6084 * Return codes 6085 * 0 - successful 6086 * other values - error 6087 **/ 6088 static int 6089 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 6090 { 6091 struct pci_dev *pdev; 6092 unsigned long bar0map_len, bar2map_len; 6093 int i, hbq_count; 6094 void *ptr; 6095 int error = -ENODEV; 6096 6097 /* Obtain PCI device reference */ 6098 if (!phba->pcidev) 6099 return error; 6100 else 6101 pdev = phba->pcidev; 6102 6103 /* Set the device DMA mask size */ 6104 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6105 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6106 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6107 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6108 return error; 6109 } 6110 } 6111 6112 /* Get the bus address of Bar0 and Bar2 and the number of bytes 6113 * required by each mapping. 6114 */ 6115 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6116 bar0map_len = pci_resource_len(pdev, 0); 6117 6118 phba->pci_bar2_map = pci_resource_start(pdev, 2); 6119 bar2map_len = pci_resource_len(pdev, 2); 6120 6121 /* Map HBA SLIM to a kernel virtual address. */ 6122 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6123 if (!phba->slim_memmap_p) { 6124 dev_printk(KERN_ERR, &pdev->dev, 6125 "ioremap failed for SLIM memory.\n"); 6126 goto out; 6127 } 6128 6129 /* Map HBA Control Registers to a kernel virtual address. */ 6130 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 6131 if (!phba->ctrl_regs_memmap_p) { 6132 dev_printk(KERN_ERR, &pdev->dev, 6133 "ioremap failed for HBA control registers.\n"); 6134 goto out_iounmap_slim; 6135 } 6136 6137 /* Allocate memory for SLI-2 structures */ 6138 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 6139 SLI2_SLIM_SIZE, 6140 &phba->slim2p.phys, 6141 GFP_KERNEL); 6142 if (!phba->slim2p.virt) 6143 goto out_iounmap; 6144 6145 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 6146 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6147 phba->mbox_ext = (phba->slim2p.virt + 6148 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 6149 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 6150 phba->IOCBs = (phba->slim2p.virt + 6151 offsetof(struct lpfc_sli2_slim, IOCBs)); 6152 6153 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 6154 lpfc_sli_hbq_size(), 6155 &phba->hbqslimp.phys, 6156 GFP_KERNEL); 6157 if (!phba->hbqslimp.virt) 6158 goto out_free_slim; 6159 6160 hbq_count = lpfc_sli_hbq_count(); 6161 ptr = phba->hbqslimp.virt; 6162 for (i = 0; i < hbq_count; ++i) { 6163 phba->hbqs[i].hbq_virt = ptr; 6164 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6165 ptr += (lpfc_hbq_defs[i]->entry_count * 6166 sizeof(struct lpfc_hbq_entry)); 6167 } 6168 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6169 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6170 6171 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6172 6173 INIT_LIST_HEAD(&phba->rb_pend_list); 6174 6175 phba->MBslimaddr = phba->slim_memmap_p; 6176 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6177 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6178 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6179 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6180 6181 return 0; 6182 6183 out_free_slim: 6184 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6185 phba->slim2p.virt, phba->slim2p.phys); 6186 out_iounmap: 6187 iounmap(phba->ctrl_regs_memmap_p); 6188 out_iounmap_slim: 6189 iounmap(phba->slim_memmap_p); 6190 out: 6191 return error; 6192 } 6193 6194 /** 6195 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 6196 * @phba: pointer to lpfc hba data structure. 6197 * 6198 * This routine is invoked to unset the PCI device memory space for device 6199 * with SLI-3 interface spec. 6200 **/ 6201 static void 6202 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 6203 { 6204 struct pci_dev *pdev; 6205 6206 /* Obtain PCI device reference */ 6207 if (!phba->pcidev) 6208 return; 6209 else 6210 pdev = phba->pcidev; 6211 6212 /* Free coherent DMA memory allocated */ 6213 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6214 phba->hbqslimp.virt, phba->hbqslimp.phys); 6215 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6216 phba->slim2p.virt, phba->slim2p.phys); 6217 6218 /* I/O memory unmap */ 6219 iounmap(phba->ctrl_regs_memmap_p); 6220 iounmap(phba->slim_memmap_p); 6221 6222 return; 6223 } 6224 6225 /** 6226 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 6227 * @phba: pointer to lpfc hba data structure. 6228 * 6229 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 6230 * done and check status. 6231 * 6232 * Return 0 if successful, otherwise -ENODEV. 6233 **/ 6234 int 6235 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 6236 { 6237 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 6238 struct lpfc_register reg_data; 6239 int i, port_error = 0; 6240 uint32_t if_type; 6241 6242 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 6243 memset(®_data, 0, sizeof(reg_data)); 6244 if (!phba->sli4_hba.PSMPHRregaddr) 6245 return -ENODEV; 6246 6247 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 6248 for (i = 0; i < 3000; i++) { 6249 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 6250 &portsmphr_reg.word0) || 6251 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 6252 /* Port has a fatal POST error, break out */ 6253 port_error = -ENODEV; 6254 break; 6255 } 6256 if (LPFC_POST_STAGE_PORT_READY == 6257 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 6258 break; 6259 msleep(10); 6260 } 6261 6262 /* 6263 * If there was a port error during POST, then don't proceed with 6264 * other register reads as the data may not be valid. Just exit. 6265 */ 6266 if (port_error) { 6267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6268 "1408 Port Failed POST - portsmphr=0x%x, " 6269 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 6270 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 6271 portsmphr_reg.word0, 6272 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 6273 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 6274 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 6275 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 6276 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 6277 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 6278 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 6279 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 6280 } else { 6281 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6282 "2534 Device Info: SLIFamily=0x%x, " 6283 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 6284 "SLIHint_2=0x%x, FT=0x%x\n", 6285 bf_get(lpfc_sli_intf_sli_family, 6286 &phba->sli4_hba.sli_intf), 6287 bf_get(lpfc_sli_intf_slirev, 6288 &phba->sli4_hba.sli_intf), 6289 bf_get(lpfc_sli_intf_if_type, 6290 &phba->sli4_hba.sli_intf), 6291 bf_get(lpfc_sli_intf_sli_hint1, 6292 &phba->sli4_hba.sli_intf), 6293 bf_get(lpfc_sli_intf_sli_hint2, 6294 &phba->sli4_hba.sli_intf), 6295 bf_get(lpfc_sli_intf_func_type, 6296 &phba->sli4_hba.sli_intf)); 6297 /* 6298 * Check for other Port errors during the initialization 6299 * process. Fail the load if the port did not come up 6300 * correctly. 6301 */ 6302 if_type = bf_get(lpfc_sli_intf_if_type, 6303 &phba->sli4_hba.sli_intf); 6304 switch (if_type) { 6305 case LPFC_SLI_INTF_IF_TYPE_0: 6306 phba->sli4_hba.ue_mask_lo = 6307 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 6308 phba->sli4_hba.ue_mask_hi = 6309 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 6310 uerrlo_reg.word0 = 6311 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 6312 uerrhi_reg.word0 = 6313 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 6314 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 6315 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 6316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6317 "1422 Unrecoverable Error " 6318 "Detected during POST " 6319 "uerr_lo_reg=0x%x, " 6320 "uerr_hi_reg=0x%x, " 6321 "ue_mask_lo_reg=0x%x, " 6322 "ue_mask_hi_reg=0x%x\n", 6323 uerrlo_reg.word0, 6324 uerrhi_reg.word0, 6325 phba->sli4_hba.ue_mask_lo, 6326 phba->sli4_hba.ue_mask_hi); 6327 port_error = -ENODEV; 6328 } 6329 break; 6330 case LPFC_SLI_INTF_IF_TYPE_2: 6331 /* Final checks. The port status should be clean. */ 6332 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 6333 ®_data.word0) || 6334 (bf_get(lpfc_sliport_status_err, ®_data) && 6335 !bf_get(lpfc_sliport_status_rn, ®_data))) { 6336 phba->work_status[0] = 6337 readl(phba->sli4_hba.u.if_type2. 6338 ERR1regaddr); 6339 phba->work_status[1] = 6340 readl(phba->sli4_hba.u.if_type2. 6341 ERR2regaddr); 6342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6343 "2888 Unrecoverable port error " 6344 "following POST: port status reg " 6345 "0x%x, port_smphr reg 0x%x, " 6346 "error 1=0x%x, error 2=0x%x\n", 6347 reg_data.word0, 6348 portsmphr_reg.word0, 6349 phba->work_status[0], 6350 phba->work_status[1]); 6351 port_error = -ENODEV; 6352 } 6353 break; 6354 case LPFC_SLI_INTF_IF_TYPE_1: 6355 default: 6356 break; 6357 } 6358 } 6359 return port_error; 6360 } 6361 6362 /** 6363 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 6364 * @phba: pointer to lpfc hba data structure. 6365 * @if_type: The SLI4 interface type getting configured. 6366 * 6367 * This routine is invoked to set up SLI4 BAR0 PCI config space register 6368 * memory map. 6369 **/ 6370 static void 6371 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 6372 { 6373 switch (if_type) { 6374 case LPFC_SLI_INTF_IF_TYPE_0: 6375 phba->sli4_hba.u.if_type0.UERRLOregaddr = 6376 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 6377 phba->sli4_hba.u.if_type0.UERRHIregaddr = 6378 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 6379 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 6380 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 6381 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 6382 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 6383 phba->sli4_hba.SLIINTFregaddr = 6384 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6385 break; 6386 case LPFC_SLI_INTF_IF_TYPE_2: 6387 phba->sli4_hba.u.if_type2.ERR1regaddr = 6388 phba->sli4_hba.conf_regs_memmap_p + 6389 LPFC_CTL_PORT_ER1_OFFSET; 6390 phba->sli4_hba.u.if_type2.ERR2regaddr = 6391 phba->sli4_hba.conf_regs_memmap_p + 6392 LPFC_CTL_PORT_ER2_OFFSET; 6393 phba->sli4_hba.u.if_type2.CTRLregaddr = 6394 phba->sli4_hba.conf_regs_memmap_p + 6395 LPFC_CTL_PORT_CTL_OFFSET; 6396 phba->sli4_hba.u.if_type2.STATUSregaddr = 6397 phba->sli4_hba.conf_regs_memmap_p + 6398 LPFC_CTL_PORT_STA_OFFSET; 6399 phba->sli4_hba.SLIINTFregaddr = 6400 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6401 phba->sli4_hba.PSMPHRregaddr = 6402 phba->sli4_hba.conf_regs_memmap_p + 6403 LPFC_CTL_PORT_SEM_OFFSET; 6404 phba->sli4_hba.RQDBregaddr = 6405 phba->sli4_hba.conf_regs_memmap_p + 6406 LPFC_ULP0_RQ_DOORBELL; 6407 phba->sli4_hba.WQDBregaddr = 6408 phba->sli4_hba.conf_regs_memmap_p + 6409 LPFC_ULP0_WQ_DOORBELL; 6410 phba->sli4_hba.EQCQDBregaddr = 6411 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 6412 phba->sli4_hba.MQDBregaddr = 6413 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 6414 phba->sli4_hba.BMBXregaddr = 6415 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 6416 break; 6417 case LPFC_SLI_INTF_IF_TYPE_1: 6418 default: 6419 dev_printk(KERN_ERR, &phba->pcidev->dev, 6420 "FATAL - unsupported SLI4 interface type - %d\n", 6421 if_type); 6422 break; 6423 } 6424 } 6425 6426 /** 6427 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 6428 * @phba: pointer to lpfc hba data structure. 6429 * 6430 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 6431 * memory map. 6432 **/ 6433 static void 6434 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 6435 { 6436 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6437 LPFC_SLIPORT_IF0_SMPHR; 6438 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6439 LPFC_HST_ISR0; 6440 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6441 LPFC_HST_IMR0; 6442 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6443 LPFC_HST_ISCR0; 6444 } 6445 6446 /** 6447 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 6448 * @phba: pointer to lpfc hba data structure. 6449 * @vf: virtual function number 6450 * 6451 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 6452 * based on the given viftual function number, @vf. 6453 * 6454 * Return 0 if successful, otherwise -ENODEV. 6455 **/ 6456 static int 6457 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 6458 { 6459 if (vf > LPFC_VIR_FUNC_MAX) 6460 return -ENODEV; 6461 6462 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6463 vf * LPFC_VFR_PAGE_SIZE + 6464 LPFC_ULP0_RQ_DOORBELL); 6465 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6466 vf * LPFC_VFR_PAGE_SIZE + 6467 LPFC_ULP0_WQ_DOORBELL); 6468 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6469 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 6470 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6471 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 6472 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6473 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 6474 return 0; 6475 } 6476 6477 /** 6478 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 6479 * @phba: pointer to lpfc hba data structure. 6480 * 6481 * This routine is invoked to create the bootstrap mailbox 6482 * region consistent with the SLI-4 interface spec. This 6483 * routine allocates all memory necessary to communicate 6484 * mailbox commands to the port and sets up all alignment 6485 * needs. No locks are expected to be held when calling 6486 * this routine. 6487 * 6488 * Return codes 6489 * 0 - successful 6490 * -ENOMEM - could not allocated memory. 6491 **/ 6492 static int 6493 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 6494 { 6495 uint32_t bmbx_size; 6496 struct lpfc_dmabuf *dmabuf; 6497 struct dma_address *dma_address; 6498 uint32_t pa_addr; 6499 uint64_t phys_addr; 6500 6501 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6502 if (!dmabuf) 6503 return -ENOMEM; 6504 6505 /* 6506 * The bootstrap mailbox region is comprised of 2 parts 6507 * plus an alignment restriction of 16 bytes. 6508 */ 6509 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 6510 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6511 bmbx_size, 6512 &dmabuf->phys, 6513 GFP_KERNEL); 6514 if (!dmabuf->virt) { 6515 kfree(dmabuf); 6516 return -ENOMEM; 6517 } 6518 memset(dmabuf->virt, 0, bmbx_size); 6519 6520 /* 6521 * Initialize the bootstrap mailbox pointers now so that the register 6522 * operations are simple later. The mailbox dma address is required 6523 * to be 16-byte aligned. Also align the virtual memory as each 6524 * maibox is copied into the bmbx mailbox region before issuing the 6525 * command to the port. 6526 */ 6527 phba->sli4_hba.bmbx.dmabuf = dmabuf; 6528 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 6529 6530 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 6531 LPFC_ALIGN_16_BYTE); 6532 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 6533 LPFC_ALIGN_16_BYTE); 6534 6535 /* 6536 * Set the high and low physical addresses now. The SLI4 alignment 6537 * requirement is 16 bytes and the mailbox is posted to the port 6538 * as two 30-bit addresses. The other data is a bit marking whether 6539 * the 30-bit address is the high or low address. 6540 * Upcast bmbx aphys to 64bits so shift instruction compiles 6541 * clean on 32 bit machines. 6542 */ 6543 dma_address = &phba->sli4_hba.bmbx.dma_address; 6544 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 6545 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 6546 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 6547 LPFC_BMBX_BIT1_ADDR_HI); 6548 6549 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 6550 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 6551 LPFC_BMBX_BIT1_ADDR_LO); 6552 return 0; 6553 } 6554 6555 /** 6556 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 6557 * @phba: pointer to lpfc hba data structure. 6558 * 6559 * This routine is invoked to teardown the bootstrap mailbox 6560 * region and release all host resources. This routine requires 6561 * the caller to ensure all mailbox commands recovered, no 6562 * additional mailbox comands are sent, and interrupts are disabled 6563 * before calling this routine. 6564 * 6565 **/ 6566 static void 6567 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 6568 { 6569 dma_free_coherent(&phba->pcidev->dev, 6570 phba->sli4_hba.bmbx.bmbx_size, 6571 phba->sli4_hba.bmbx.dmabuf->virt, 6572 phba->sli4_hba.bmbx.dmabuf->phys); 6573 6574 kfree(phba->sli4_hba.bmbx.dmabuf); 6575 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 6576 } 6577 6578 /** 6579 * lpfc_sli4_read_config - Get the config parameters. 6580 * @phba: pointer to lpfc hba data structure. 6581 * 6582 * This routine is invoked to read the configuration parameters from the HBA. 6583 * The configuration parameters are used to set the base and maximum values 6584 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 6585 * allocation for the port. 6586 * 6587 * Return codes 6588 * 0 - successful 6589 * -ENOMEM - No available memory 6590 * -EIO - The mailbox failed to complete successfully. 6591 **/ 6592 int 6593 lpfc_sli4_read_config(struct lpfc_hba *phba) 6594 { 6595 LPFC_MBOXQ_t *pmb; 6596 struct lpfc_mbx_read_config *rd_config; 6597 union lpfc_sli4_cfg_shdr *shdr; 6598 uint32_t shdr_status, shdr_add_status; 6599 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6600 struct lpfc_rsrc_desc_fcfcoe *desc; 6601 char *pdesc_0; 6602 uint32_t desc_count; 6603 int length, i, rc = 0, rc2; 6604 6605 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6606 if (!pmb) { 6607 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6608 "2011 Unable to allocate memory for issuing " 6609 "SLI_CONFIG_SPECIAL mailbox command\n"); 6610 return -ENOMEM; 6611 } 6612 6613 lpfc_read_config(phba, pmb); 6614 6615 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6616 if (rc != MBX_SUCCESS) { 6617 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6618 "2012 Mailbox failed , mbxCmd x%x " 6619 "READ_CONFIG, mbxStatus x%x\n", 6620 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6621 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6622 rc = -EIO; 6623 } else { 6624 rd_config = &pmb->u.mqe.un.rd_config; 6625 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 6626 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 6627 phba->sli4_hba.lnk_info.lnk_tp = 6628 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 6629 phba->sli4_hba.lnk_info.lnk_no = 6630 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 6631 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6632 "3081 lnk_type:%d, lnk_numb:%d\n", 6633 phba->sli4_hba.lnk_info.lnk_tp, 6634 phba->sli4_hba.lnk_info.lnk_no); 6635 } else 6636 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6637 "3082 Mailbox (x%x) returned ldv:x0\n", 6638 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 6639 phba->sli4_hba.extents_in_use = 6640 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 6641 phba->sli4_hba.max_cfg_param.max_xri = 6642 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 6643 phba->sli4_hba.max_cfg_param.xri_base = 6644 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 6645 phba->sli4_hba.max_cfg_param.max_vpi = 6646 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 6647 phba->sli4_hba.max_cfg_param.vpi_base = 6648 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 6649 phba->sli4_hba.max_cfg_param.max_rpi = 6650 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 6651 phba->sli4_hba.max_cfg_param.rpi_base = 6652 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 6653 phba->sli4_hba.max_cfg_param.max_vfi = 6654 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 6655 phba->sli4_hba.max_cfg_param.vfi_base = 6656 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 6657 phba->sli4_hba.max_cfg_param.max_fcfi = 6658 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 6659 phba->sli4_hba.max_cfg_param.max_eq = 6660 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 6661 phba->sli4_hba.max_cfg_param.max_rq = 6662 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 6663 phba->sli4_hba.max_cfg_param.max_wq = 6664 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 6665 phba->sli4_hba.max_cfg_param.max_cq = 6666 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 6667 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 6668 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 6669 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 6670 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 6671 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 6672 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 6673 phba->max_vports = phba->max_vpi; 6674 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6675 "2003 cfg params Extents? %d " 6676 "XRI(B:%d M:%d), " 6677 "VPI(B:%d M:%d) " 6678 "VFI(B:%d M:%d) " 6679 "RPI(B:%d M:%d) " 6680 "FCFI(Count:%d)\n", 6681 phba->sli4_hba.extents_in_use, 6682 phba->sli4_hba.max_cfg_param.xri_base, 6683 phba->sli4_hba.max_cfg_param.max_xri, 6684 phba->sli4_hba.max_cfg_param.vpi_base, 6685 phba->sli4_hba.max_cfg_param.max_vpi, 6686 phba->sli4_hba.max_cfg_param.vfi_base, 6687 phba->sli4_hba.max_cfg_param.max_vfi, 6688 phba->sli4_hba.max_cfg_param.rpi_base, 6689 phba->sli4_hba.max_cfg_param.max_rpi, 6690 phba->sli4_hba.max_cfg_param.max_fcfi); 6691 } 6692 6693 if (rc) 6694 goto read_cfg_out; 6695 6696 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 6697 length = phba->sli4_hba.max_cfg_param.max_xri - 6698 lpfc_sli4_get_els_iocb_cnt(phba); 6699 if (phba->cfg_hba_queue_depth > length) { 6700 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6701 "3361 HBA queue depth changed from %d to %d\n", 6702 phba->cfg_hba_queue_depth, length); 6703 phba->cfg_hba_queue_depth = length; 6704 } 6705 6706 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 6707 LPFC_SLI_INTF_IF_TYPE_2) 6708 goto read_cfg_out; 6709 6710 /* get the pf# and vf# for SLI4 if_type 2 port */ 6711 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 6712 sizeof(struct lpfc_sli4_cfg_mhdr)); 6713 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 6714 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 6715 length, LPFC_SLI4_MBX_EMBED); 6716 6717 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6718 shdr = (union lpfc_sli4_cfg_shdr *) 6719 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 6720 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6721 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6722 if (rc2 || shdr_status || shdr_add_status) { 6723 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6724 "3026 Mailbox failed , mbxCmd x%x " 6725 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 6726 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6727 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6728 goto read_cfg_out; 6729 } 6730 6731 /* search for fc_fcoe resrouce descriptor */ 6732 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6733 desc_count = get_func_cfg->func_cfg.rsrc_desc_count; 6734 6735 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 6736 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 6737 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 6738 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 6739 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 6740 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 6741 goto read_cfg_out; 6742 6743 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 6744 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 6745 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 6746 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 6747 phba->sli4_hba.iov.pf_number = 6748 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 6749 phba->sli4_hba.iov.vf_number = 6750 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 6751 break; 6752 } 6753 } 6754 6755 if (i < LPFC_RSRC_DESC_MAX_NUM) 6756 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6757 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 6758 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 6759 phba->sli4_hba.iov.vf_number); 6760 else 6761 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6762 "3028 GET_FUNCTION_CONFIG: failed to find " 6763 "Resrouce Descriptor:x%x\n", 6764 LPFC_RSRC_DESC_TYPE_FCFCOE); 6765 6766 read_cfg_out: 6767 mempool_free(pmb, phba->mbox_mem_pool); 6768 return rc; 6769 } 6770 6771 /** 6772 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 6773 * @phba: pointer to lpfc hba data structure. 6774 * 6775 * This routine is invoked to setup the port-side endian order when 6776 * the port if_type is 0. This routine has no function for other 6777 * if_types. 6778 * 6779 * Return codes 6780 * 0 - successful 6781 * -ENOMEM - No available memory 6782 * -EIO - The mailbox failed to complete successfully. 6783 **/ 6784 static int 6785 lpfc_setup_endian_order(struct lpfc_hba *phba) 6786 { 6787 LPFC_MBOXQ_t *mboxq; 6788 uint32_t if_type, rc = 0; 6789 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 6790 HOST_ENDIAN_HIGH_WORD1}; 6791 6792 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6793 switch (if_type) { 6794 case LPFC_SLI_INTF_IF_TYPE_0: 6795 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6796 GFP_KERNEL); 6797 if (!mboxq) { 6798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6799 "0492 Unable to allocate memory for " 6800 "issuing SLI_CONFIG_SPECIAL mailbox " 6801 "command\n"); 6802 return -ENOMEM; 6803 } 6804 6805 /* 6806 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 6807 * two words to contain special data values and no other data. 6808 */ 6809 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 6810 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 6811 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6812 if (rc != MBX_SUCCESS) { 6813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6814 "0493 SLI_CONFIG_SPECIAL mailbox " 6815 "failed with status x%x\n", 6816 rc); 6817 rc = -EIO; 6818 } 6819 mempool_free(mboxq, phba->mbox_mem_pool); 6820 break; 6821 case LPFC_SLI_INTF_IF_TYPE_2: 6822 case LPFC_SLI_INTF_IF_TYPE_1: 6823 default: 6824 break; 6825 } 6826 return rc; 6827 } 6828 6829 /** 6830 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts 6831 * @phba: pointer to lpfc hba data structure. 6832 * 6833 * This routine is invoked to check the user settable queue counts for EQs and 6834 * CQs. after this routine is called the counts will be set to valid values that 6835 * adhere to the constraints of the system's interrupt vectors and the port's 6836 * queue resources. 6837 * 6838 * Return codes 6839 * 0 - successful 6840 * -ENOMEM - No available memory 6841 **/ 6842 static int 6843 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 6844 { 6845 int cfg_fcp_io_channel; 6846 uint32_t cpu; 6847 uint32_t i = 0; 6848 int fof_vectors = phba->cfg_fof ? 1 : 0; 6849 6850 /* 6851 * Sanity check for configured queue parameters against the run-time 6852 * device parameters 6853 */ 6854 6855 /* Sanity check on HBA EQ parameters */ 6856 cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 6857 6858 /* It doesn't make sense to have more io channels then online CPUs */ 6859 for_each_present_cpu(cpu) { 6860 if (cpu_online(cpu)) 6861 i++; 6862 } 6863 phba->sli4_hba.num_online_cpu = i; 6864 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6865 phba->sli4_hba.curr_disp_cpu = 0; 6866 6867 if (i < cfg_fcp_io_channel) { 6868 lpfc_printf_log(phba, 6869 KERN_ERR, LOG_INIT, 6870 "3188 Reducing IO channels to match number of " 6871 "online CPUs: from %d to %d\n", 6872 cfg_fcp_io_channel, i); 6873 cfg_fcp_io_channel = i; 6874 } 6875 6876 if (cfg_fcp_io_channel + fof_vectors > 6877 phba->sli4_hba.max_cfg_param.max_eq) { 6878 if (phba->sli4_hba.max_cfg_param.max_eq < 6879 LPFC_FCP_IO_CHAN_MIN) { 6880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6881 "2574 Not enough EQs (%d) from the " 6882 "pci function for supporting FCP " 6883 "EQs (%d)\n", 6884 phba->sli4_hba.max_cfg_param.max_eq, 6885 phba->cfg_fcp_io_channel); 6886 goto out_error; 6887 } 6888 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6889 "2575 Reducing IO channels to match number of " 6890 "available EQs: from %d to %d\n", 6891 cfg_fcp_io_channel, 6892 phba->sli4_hba.max_cfg_param.max_eq); 6893 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - 6894 fof_vectors; 6895 } 6896 6897 /* The actual number of FCP event queues adopted */ 6898 phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 6899 6900 /* Get EQ depth from module parameter, fake the default for now */ 6901 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6902 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 6903 6904 /* Get CQ depth from module parameter, fake the default for now */ 6905 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 6906 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 6907 6908 return 0; 6909 out_error: 6910 return -ENOMEM; 6911 } 6912 6913 /** 6914 * lpfc_sli4_queue_create - Create all the SLI4 queues 6915 * @phba: pointer to lpfc hba data structure. 6916 * 6917 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 6918 * operation. For each SLI4 queue type, the parameters such as queue entry 6919 * count (queue depth) shall be taken from the module parameter. For now, 6920 * we just use some constant number as place holder. 6921 * 6922 * Return codes 6923 * 0 - successful 6924 * -ENOMEM - No availble memory 6925 * -EIO - The mailbox failed to complete successfully. 6926 **/ 6927 int 6928 lpfc_sli4_queue_create(struct lpfc_hba *phba) 6929 { 6930 struct lpfc_queue *qdesc; 6931 int idx; 6932 6933 /* 6934 * Create HBA Record arrays. 6935 */ 6936 if (!phba->cfg_fcp_io_channel) 6937 return -ERANGE; 6938 6939 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6940 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6941 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6942 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6943 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6944 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6945 6946 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * 6947 phba->cfg_fcp_io_channel), GFP_KERNEL); 6948 if (!phba->sli4_hba.hba_eq) { 6949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6950 "2576 Failed allocate memory for " 6951 "fast-path EQ record array\n"); 6952 goto out_error; 6953 } 6954 6955 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6956 phba->cfg_fcp_io_channel), GFP_KERNEL); 6957 if (!phba->sli4_hba.fcp_cq) { 6958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6959 "2577 Failed allocate memory for fast-path " 6960 "CQ record array\n"); 6961 goto out_error; 6962 } 6963 6964 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6965 phba->cfg_fcp_io_channel), GFP_KERNEL); 6966 if (!phba->sli4_hba.fcp_wq) { 6967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6968 "2578 Failed allocate memory for fast-path " 6969 "WQ record array\n"); 6970 goto out_error; 6971 } 6972 6973 /* 6974 * Since the first EQ can have multiple CQs associated with it, 6975 * this array is used to quickly see if we have a FCP fast-path 6976 * CQ match. 6977 */ 6978 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * 6979 phba->cfg_fcp_io_channel), GFP_KERNEL); 6980 if (!phba->sli4_hba.fcp_cq_map) { 6981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6982 "2545 Failed allocate memory for fast-path " 6983 "CQ map\n"); 6984 goto out_error; 6985 } 6986 6987 /* 6988 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies 6989 * how many EQs to create. 6990 */ 6991 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 6992 6993 /* Create EQs */ 6994 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6995 phba->sli4_hba.eq_ecount); 6996 if (!qdesc) { 6997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6998 "0497 Failed allocate EQ (%d)\n", idx); 6999 goto out_error; 7000 } 7001 phba->sli4_hba.hba_eq[idx] = qdesc; 7002 7003 /* Create Fast Path FCP CQs */ 7004 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7005 phba->sli4_hba.cq_ecount); 7006 if (!qdesc) { 7007 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7008 "0499 Failed allocate fast-path FCP " 7009 "CQ (%d)\n", idx); 7010 goto out_error; 7011 } 7012 phba->sli4_hba.fcp_cq[idx] = qdesc; 7013 7014 /* Create Fast Path FCP WQs */ 7015 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 7016 phba->sli4_hba.wq_ecount); 7017 if (!qdesc) { 7018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7019 "0503 Failed allocate fast-path FCP " 7020 "WQ (%d)\n", idx); 7021 goto out_error; 7022 } 7023 phba->sli4_hba.fcp_wq[idx] = qdesc; 7024 } 7025 7026 7027 /* 7028 * Create Slow Path Completion Queues (CQs) 7029 */ 7030 7031 /* Create slow-path Mailbox Command Complete Queue */ 7032 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7033 phba->sli4_hba.cq_ecount); 7034 if (!qdesc) { 7035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7036 "0500 Failed allocate slow-path mailbox CQ\n"); 7037 goto out_error; 7038 } 7039 phba->sli4_hba.mbx_cq = qdesc; 7040 7041 /* Create slow-path ELS Complete Queue */ 7042 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7043 phba->sli4_hba.cq_ecount); 7044 if (!qdesc) { 7045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7046 "0501 Failed allocate slow-path ELS CQ\n"); 7047 goto out_error; 7048 } 7049 phba->sli4_hba.els_cq = qdesc; 7050 7051 7052 /* 7053 * Create Slow Path Work Queues (WQs) 7054 */ 7055 7056 /* Create Mailbox Command Queue */ 7057 7058 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 7059 phba->sli4_hba.mq_ecount); 7060 if (!qdesc) { 7061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7062 "0505 Failed allocate slow-path MQ\n"); 7063 goto out_error; 7064 } 7065 phba->sli4_hba.mbx_wq = qdesc; 7066 7067 /* 7068 * Create ELS Work Queues 7069 */ 7070 7071 /* Create slow-path ELS Work Queue */ 7072 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 7073 phba->sli4_hba.wq_ecount); 7074 if (!qdesc) { 7075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7076 "0504 Failed allocate slow-path ELS WQ\n"); 7077 goto out_error; 7078 } 7079 phba->sli4_hba.els_wq = qdesc; 7080 7081 /* 7082 * Create Receive Queue (RQ) 7083 */ 7084 7085 /* Create Receive Queue for header */ 7086 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7087 phba->sli4_hba.rq_ecount); 7088 if (!qdesc) { 7089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7090 "0506 Failed allocate receive HRQ\n"); 7091 goto out_error; 7092 } 7093 phba->sli4_hba.hdr_rq = qdesc; 7094 7095 /* Create Receive Queue for data */ 7096 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7097 phba->sli4_hba.rq_ecount); 7098 if (!qdesc) { 7099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7100 "0507 Failed allocate receive DRQ\n"); 7101 goto out_error; 7102 } 7103 phba->sli4_hba.dat_rq = qdesc; 7104 7105 /* Create the Queues needed for Flash Optimized Fabric operations */ 7106 if (phba->cfg_fof) 7107 lpfc_fof_queue_create(phba); 7108 return 0; 7109 7110 out_error: 7111 lpfc_sli4_queue_destroy(phba); 7112 return -ENOMEM; 7113 } 7114 7115 /** 7116 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 7117 * @phba: pointer to lpfc hba data structure. 7118 * 7119 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 7120 * operation. 7121 * 7122 * Return codes 7123 * 0 - successful 7124 * -ENOMEM - No available memory 7125 * -EIO - The mailbox failed to complete successfully. 7126 **/ 7127 void 7128 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 7129 { 7130 int idx; 7131 7132 if (phba->cfg_fof) 7133 lpfc_fof_queue_destroy(phba); 7134 7135 if (phba->sli4_hba.hba_eq != NULL) { 7136 /* Release HBA event queue */ 7137 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7138 if (phba->sli4_hba.hba_eq[idx] != NULL) { 7139 lpfc_sli4_queue_free( 7140 phba->sli4_hba.hba_eq[idx]); 7141 phba->sli4_hba.hba_eq[idx] = NULL; 7142 } 7143 } 7144 kfree(phba->sli4_hba.hba_eq); 7145 phba->sli4_hba.hba_eq = NULL; 7146 } 7147 7148 if (phba->sli4_hba.fcp_cq != NULL) { 7149 /* Release FCP completion queue */ 7150 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7151 if (phba->sli4_hba.fcp_cq[idx] != NULL) { 7152 lpfc_sli4_queue_free( 7153 phba->sli4_hba.fcp_cq[idx]); 7154 phba->sli4_hba.fcp_cq[idx] = NULL; 7155 } 7156 } 7157 kfree(phba->sli4_hba.fcp_cq); 7158 phba->sli4_hba.fcp_cq = NULL; 7159 } 7160 7161 if (phba->sli4_hba.fcp_wq != NULL) { 7162 /* Release FCP work queue */ 7163 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7164 if (phba->sli4_hba.fcp_wq[idx] != NULL) { 7165 lpfc_sli4_queue_free( 7166 phba->sli4_hba.fcp_wq[idx]); 7167 phba->sli4_hba.fcp_wq[idx] = NULL; 7168 } 7169 } 7170 kfree(phba->sli4_hba.fcp_wq); 7171 phba->sli4_hba.fcp_wq = NULL; 7172 } 7173 7174 /* Release FCP CQ mapping array */ 7175 if (phba->sli4_hba.fcp_cq_map != NULL) { 7176 kfree(phba->sli4_hba.fcp_cq_map); 7177 phba->sli4_hba.fcp_cq_map = NULL; 7178 } 7179 7180 /* Release mailbox command work queue */ 7181 if (phba->sli4_hba.mbx_wq != NULL) { 7182 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 7183 phba->sli4_hba.mbx_wq = NULL; 7184 } 7185 7186 /* Release ELS work queue */ 7187 if (phba->sli4_hba.els_wq != NULL) { 7188 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 7189 phba->sli4_hba.els_wq = NULL; 7190 } 7191 7192 /* Release unsolicited receive queue */ 7193 if (phba->sli4_hba.hdr_rq != NULL) { 7194 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 7195 phba->sli4_hba.hdr_rq = NULL; 7196 } 7197 if (phba->sli4_hba.dat_rq != NULL) { 7198 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 7199 phba->sli4_hba.dat_rq = NULL; 7200 } 7201 7202 /* Release ELS complete queue */ 7203 if (phba->sli4_hba.els_cq != NULL) { 7204 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 7205 phba->sli4_hba.els_cq = NULL; 7206 } 7207 7208 /* Release mailbox command complete queue */ 7209 if (phba->sli4_hba.mbx_cq != NULL) { 7210 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 7211 phba->sli4_hba.mbx_cq = NULL; 7212 } 7213 7214 return; 7215 } 7216 7217 /** 7218 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 7219 * @phba: pointer to lpfc hba data structure. 7220 * 7221 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 7222 * operation. 7223 * 7224 * Return codes 7225 * 0 - successful 7226 * -ENOMEM - No available memory 7227 * -EIO - The mailbox failed to complete successfully. 7228 **/ 7229 int 7230 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 7231 { 7232 struct lpfc_sli *psli = &phba->sli; 7233 struct lpfc_sli_ring *pring; 7234 int rc = -ENOMEM; 7235 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 7236 int fcp_cq_index = 0; 7237 uint32_t shdr_status, shdr_add_status; 7238 union lpfc_sli4_cfg_shdr *shdr; 7239 LPFC_MBOXQ_t *mboxq; 7240 uint32_t length; 7241 7242 /* Check for dual-ULP support */ 7243 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7244 if (!mboxq) { 7245 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7246 "3249 Unable to allocate memory for " 7247 "QUERY_FW_CFG mailbox command\n"); 7248 return -ENOMEM; 7249 } 7250 length = (sizeof(struct lpfc_mbx_query_fw_config) - 7251 sizeof(struct lpfc_sli4_cfg_mhdr)); 7252 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7253 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 7254 length, LPFC_SLI4_MBX_EMBED); 7255 7256 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7257 7258 shdr = (union lpfc_sli4_cfg_shdr *) 7259 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7260 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7261 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7262 if (shdr_status || shdr_add_status || rc) { 7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7264 "3250 QUERY_FW_CFG mailbox failed with status " 7265 "x%x add_status x%x, mbx status x%x\n", 7266 shdr_status, shdr_add_status, rc); 7267 if (rc != MBX_TIMEOUT) 7268 mempool_free(mboxq, phba->mbox_mem_pool); 7269 rc = -ENXIO; 7270 goto out_error; 7271 } 7272 7273 phba->sli4_hba.fw_func_mode = 7274 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 7275 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 7276 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 7277 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7278 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 7279 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 7280 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 7281 7282 if (rc != MBX_TIMEOUT) 7283 mempool_free(mboxq, phba->mbox_mem_pool); 7284 7285 /* 7286 * Set up HBA Event Queues (EQs) 7287 */ 7288 7289 /* Set up HBA event queue */ 7290 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { 7291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7292 "3147 Fast-path EQs not allocated\n"); 7293 rc = -ENOMEM; 7294 goto out_error; 7295 } 7296 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 7297 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { 7298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7299 "0522 Fast-path EQ (%d) not " 7300 "allocated\n", fcp_eqidx); 7301 rc = -ENOMEM; 7302 goto out_destroy_hba_eq; 7303 } 7304 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], 7305 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel)); 7306 if (rc) { 7307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7308 "0523 Failed setup of fast-path EQ " 7309 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 7310 goto out_destroy_hba_eq; 7311 } 7312 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7313 "2584 HBA EQ setup: " 7314 "queue[%d]-id=%d\n", fcp_eqidx, 7315 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); 7316 } 7317 7318 /* Set up fast-path FCP Response Complete Queue */ 7319 if (!phba->sli4_hba.fcp_cq) { 7320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7321 "3148 Fast-path FCP CQ array not " 7322 "allocated\n"); 7323 rc = -ENOMEM; 7324 goto out_destroy_hba_eq; 7325 } 7326 7327 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { 7328 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 7329 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7330 "0526 Fast-path FCP CQ (%d) not " 7331 "allocated\n", fcp_cqidx); 7332 rc = -ENOMEM; 7333 goto out_destroy_fcp_cq; 7334 } 7335 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 7336 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); 7337 if (rc) { 7338 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7339 "0527 Failed setup of fast-path FCP " 7340 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 7341 goto out_destroy_fcp_cq; 7342 } 7343 7344 /* Setup fcp_cq_map for fast lookup */ 7345 phba->sli4_hba.fcp_cq_map[fcp_cqidx] = 7346 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; 7347 7348 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7349 "2588 FCP CQ setup: cq[%d]-id=%d, " 7350 "parent seq[%d]-id=%d\n", 7351 fcp_cqidx, 7352 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 7353 fcp_cqidx, 7354 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); 7355 } 7356 7357 /* Set up fast-path FCP Work Queue */ 7358 if (!phba->sli4_hba.fcp_wq) { 7359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7360 "3149 Fast-path FCP WQ array not " 7361 "allocated\n"); 7362 rc = -ENOMEM; 7363 goto out_destroy_fcp_cq; 7364 } 7365 7366 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { 7367 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 7368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7369 "0534 Fast-path FCP WQ (%d) not " 7370 "allocated\n", fcp_wqidx); 7371 rc = -ENOMEM; 7372 goto out_destroy_fcp_wq; 7373 } 7374 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 7375 phba->sli4_hba.fcp_cq[fcp_wqidx], 7376 LPFC_FCP); 7377 if (rc) { 7378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7379 "0535 Failed setup of fast-path FCP " 7380 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 7381 goto out_destroy_fcp_wq; 7382 } 7383 7384 /* Bind this WQ to the next FCP ring */ 7385 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; 7386 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; 7387 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; 7388 7389 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7390 "2591 FCP WQ setup: wq[%d]-id=%d, " 7391 "parent cq[%d]-id=%d\n", 7392 fcp_wqidx, 7393 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 7394 fcp_cq_index, 7395 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); 7396 } 7397 /* 7398 * Set up Complete Queues (CQs) 7399 */ 7400 7401 /* Set up slow-path MBOX Complete Queue as the first CQ */ 7402 if (!phba->sli4_hba.mbx_cq) { 7403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7404 "0528 Mailbox CQ not allocated\n"); 7405 rc = -ENOMEM; 7406 goto out_destroy_fcp_wq; 7407 } 7408 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, 7409 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); 7410 if (rc) { 7411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7412 "0529 Failed setup of slow-path mailbox CQ: " 7413 "rc = 0x%x\n", rc); 7414 goto out_destroy_fcp_wq; 7415 } 7416 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7417 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7418 phba->sli4_hba.mbx_cq->queue_id, 7419 phba->sli4_hba.hba_eq[0]->queue_id); 7420 7421 /* Set up slow-path ELS Complete Queue */ 7422 if (!phba->sli4_hba.els_cq) { 7423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7424 "0530 ELS CQ not allocated\n"); 7425 rc = -ENOMEM; 7426 goto out_destroy_mbx_cq; 7427 } 7428 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, 7429 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); 7430 if (rc) { 7431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7432 "0531 Failed setup of slow-path ELS CQ: " 7433 "rc = 0x%x\n", rc); 7434 goto out_destroy_mbx_cq; 7435 } 7436 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7437 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7438 phba->sli4_hba.els_cq->queue_id, 7439 phba->sli4_hba.hba_eq[0]->queue_id); 7440 7441 /* 7442 * Set up all the Work Queues (WQs) 7443 */ 7444 7445 /* Set up Mailbox Command Queue */ 7446 if (!phba->sli4_hba.mbx_wq) { 7447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7448 "0538 Slow-path MQ not allocated\n"); 7449 rc = -ENOMEM; 7450 goto out_destroy_els_cq; 7451 } 7452 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7453 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7454 if (rc) { 7455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7456 "0539 Failed setup of slow-path MQ: " 7457 "rc = 0x%x\n", rc); 7458 goto out_destroy_els_cq; 7459 } 7460 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7461 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7462 phba->sli4_hba.mbx_wq->queue_id, 7463 phba->sli4_hba.mbx_cq->queue_id); 7464 7465 /* Set up slow-path ELS Work Queue */ 7466 if (!phba->sli4_hba.els_wq) { 7467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7468 "0536 Slow-path ELS WQ not allocated\n"); 7469 rc = -ENOMEM; 7470 goto out_destroy_mbx_wq; 7471 } 7472 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 7473 phba->sli4_hba.els_cq, LPFC_ELS); 7474 if (rc) { 7475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7476 "0537 Failed setup of slow-path ELS WQ: " 7477 "rc = 0x%x\n", rc); 7478 goto out_destroy_mbx_wq; 7479 } 7480 7481 /* Bind this WQ to the ELS ring */ 7482 pring = &psli->ring[LPFC_ELS_RING]; 7483 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; 7484 phba->sli4_hba.els_cq->pring = pring; 7485 7486 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7487 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 7488 phba->sli4_hba.els_wq->queue_id, 7489 phba->sli4_hba.els_cq->queue_id); 7490 7491 /* 7492 * Create Receive Queue (RQ) 7493 */ 7494 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 7495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7496 "0540 Receive Queue not allocated\n"); 7497 rc = -ENOMEM; 7498 goto out_destroy_els_wq; 7499 } 7500 7501 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7502 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); 7503 7504 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 7505 phba->sli4_hba.els_cq, LPFC_USOL); 7506 if (rc) { 7507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7508 "0541 Failed setup of Receive Queue: " 7509 "rc = 0x%x\n", rc); 7510 goto out_destroy_fcp_wq; 7511 } 7512 7513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7514 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 7515 "parent cq-id=%d\n", 7516 phba->sli4_hba.hdr_rq->queue_id, 7517 phba->sli4_hba.dat_rq->queue_id, 7518 phba->sli4_hba.els_cq->queue_id); 7519 7520 if (phba->cfg_fof) { 7521 rc = lpfc_fof_queue_setup(phba); 7522 if (rc) { 7523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7524 "0549 Failed setup of FOF Queues: " 7525 "rc = 0x%x\n", rc); 7526 goto out_destroy_els_rq; 7527 } 7528 } 7529 return 0; 7530 7531 out_destroy_els_rq: 7532 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7533 out_destroy_els_wq: 7534 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7535 out_destroy_mbx_wq: 7536 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7537 out_destroy_els_cq: 7538 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7539 out_destroy_mbx_cq: 7540 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7541 out_destroy_fcp_wq: 7542 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 7543 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 7544 out_destroy_fcp_cq: 7545 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 7546 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 7547 out_destroy_hba_eq: 7548 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7549 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); 7550 out_error: 7551 return rc; 7552 } 7553 7554 /** 7555 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 7556 * @phba: pointer to lpfc hba data structure. 7557 * 7558 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 7559 * operation. 7560 * 7561 * Return codes 7562 * 0 - successful 7563 * -ENOMEM - No available memory 7564 * -EIO - The mailbox failed to complete successfully. 7565 **/ 7566 void 7567 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 7568 { 7569 int fcp_qidx; 7570 7571 /* Unset the queues created for Flash Optimized Fabric operations */ 7572 if (phba->cfg_fof) 7573 lpfc_fof_queue_destroy(phba); 7574 /* Unset mailbox command work queue */ 7575 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7576 /* Unset ELS work queue */ 7577 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7578 /* Unset unsolicited receive queue */ 7579 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7580 /* Unset FCP work queue */ 7581 if (phba->sli4_hba.fcp_wq) { 7582 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7583 fcp_qidx++) 7584 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7585 } 7586 /* Unset mailbox command complete queue */ 7587 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7588 /* Unset ELS complete queue */ 7589 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7590 /* Unset FCP response complete queue */ 7591 if (phba->sli4_hba.fcp_cq) { 7592 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7593 fcp_qidx++) 7594 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7595 } 7596 /* Unset fast-path event queue */ 7597 if (phba->sli4_hba.hba_eq) { 7598 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7599 fcp_qidx++) 7600 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); 7601 } 7602 } 7603 7604 /** 7605 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 7606 * @phba: pointer to lpfc hba data structure. 7607 * 7608 * This routine is invoked to allocate and set up a pool of completion queue 7609 * events. The body of the completion queue event is a completion queue entry 7610 * CQE. For now, this pool is used for the interrupt service routine to queue 7611 * the following HBA completion queue events for the worker thread to process: 7612 * - Mailbox asynchronous events 7613 * - Receive queue completion unsolicited events 7614 * Later, this can be used for all the slow-path events. 7615 * 7616 * Return codes 7617 * 0 - successful 7618 * -ENOMEM - No available memory 7619 **/ 7620 static int 7621 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 7622 { 7623 struct lpfc_cq_event *cq_event; 7624 int i; 7625 7626 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 7627 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 7628 if (!cq_event) 7629 goto out_pool_create_fail; 7630 list_add_tail(&cq_event->list, 7631 &phba->sli4_hba.sp_cqe_event_pool); 7632 } 7633 return 0; 7634 7635 out_pool_create_fail: 7636 lpfc_sli4_cq_event_pool_destroy(phba); 7637 return -ENOMEM; 7638 } 7639 7640 /** 7641 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 7642 * @phba: pointer to lpfc hba data structure. 7643 * 7644 * This routine is invoked to free the pool of completion queue events at 7645 * driver unload time. Note that, it is the responsibility of the driver 7646 * cleanup routine to free all the outstanding completion-queue events 7647 * allocated from this pool back into the pool before invoking this routine 7648 * to destroy the pool. 7649 **/ 7650 static void 7651 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 7652 { 7653 struct lpfc_cq_event *cq_event, *next_cq_event; 7654 7655 list_for_each_entry_safe(cq_event, next_cq_event, 7656 &phba->sli4_hba.sp_cqe_event_pool, list) { 7657 list_del(&cq_event->list); 7658 kfree(cq_event); 7659 } 7660 } 7661 7662 /** 7663 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 7664 * @phba: pointer to lpfc hba data structure. 7665 * 7666 * This routine is the lock free version of the API invoked to allocate a 7667 * completion-queue event from the free pool. 7668 * 7669 * Return: Pointer to the newly allocated completion-queue event if successful 7670 * NULL otherwise. 7671 **/ 7672 struct lpfc_cq_event * 7673 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 7674 { 7675 struct lpfc_cq_event *cq_event = NULL; 7676 7677 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 7678 struct lpfc_cq_event, list); 7679 return cq_event; 7680 } 7681 7682 /** 7683 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 7684 * @phba: pointer to lpfc hba data structure. 7685 * 7686 * This routine is the lock version of the API invoked to allocate a 7687 * completion-queue event from the free pool. 7688 * 7689 * Return: Pointer to the newly allocated completion-queue event if successful 7690 * NULL otherwise. 7691 **/ 7692 struct lpfc_cq_event * 7693 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 7694 { 7695 struct lpfc_cq_event *cq_event; 7696 unsigned long iflags; 7697 7698 spin_lock_irqsave(&phba->hbalock, iflags); 7699 cq_event = __lpfc_sli4_cq_event_alloc(phba); 7700 spin_unlock_irqrestore(&phba->hbalock, iflags); 7701 return cq_event; 7702 } 7703 7704 /** 7705 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 7706 * @phba: pointer to lpfc hba data structure. 7707 * @cq_event: pointer to the completion queue event to be freed. 7708 * 7709 * This routine is the lock free version of the API invoked to release a 7710 * completion-queue event back into the free pool. 7711 **/ 7712 void 7713 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 7714 struct lpfc_cq_event *cq_event) 7715 { 7716 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 7717 } 7718 7719 /** 7720 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 7721 * @phba: pointer to lpfc hba data structure. 7722 * @cq_event: pointer to the completion queue event to be freed. 7723 * 7724 * This routine is the lock version of the API invoked to release a 7725 * completion-queue event back into the free pool. 7726 **/ 7727 void 7728 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 7729 struct lpfc_cq_event *cq_event) 7730 { 7731 unsigned long iflags; 7732 spin_lock_irqsave(&phba->hbalock, iflags); 7733 __lpfc_sli4_cq_event_release(phba, cq_event); 7734 spin_unlock_irqrestore(&phba->hbalock, iflags); 7735 } 7736 7737 /** 7738 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 7739 * @phba: pointer to lpfc hba data structure. 7740 * 7741 * This routine is to free all the pending completion-queue events to the 7742 * back into the free pool for device reset. 7743 **/ 7744 static void 7745 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 7746 { 7747 LIST_HEAD(cqelist); 7748 struct lpfc_cq_event *cqe; 7749 unsigned long iflags; 7750 7751 /* Retrieve all the pending WCQEs from pending WCQE lists */ 7752 spin_lock_irqsave(&phba->hbalock, iflags); 7753 /* Pending FCP XRI abort events */ 7754 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 7755 &cqelist); 7756 /* Pending ELS XRI abort events */ 7757 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 7758 &cqelist); 7759 /* Pending asynnc events */ 7760 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 7761 &cqelist); 7762 spin_unlock_irqrestore(&phba->hbalock, iflags); 7763 7764 while (!list_empty(&cqelist)) { 7765 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 7766 lpfc_sli4_cq_event_release(phba, cqe); 7767 } 7768 } 7769 7770 /** 7771 * lpfc_pci_function_reset - Reset pci function. 7772 * @phba: pointer to lpfc hba data structure. 7773 * 7774 * This routine is invoked to request a PCI function reset. It will destroys 7775 * all resources assigned to the PCI function which originates this request. 7776 * 7777 * Return codes 7778 * 0 - successful 7779 * -ENOMEM - No available memory 7780 * -EIO - The mailbox failed to complete successfully. 7781 **/ 7782 int 7783 lpfc_pci_function_reset(struct lpfc_hba *phba) 7784 { 7785 LPFC_MBOXQ_t *mboxq; 7786 uint32_t rc = 0, if_type; 7787 uint32_t shdr_status, shdr_add_status; 7788 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 7789 union lpfc_sli4_cfg_shdr *shdr; 7790 struct lpfc_register reg_data; 7791 uint16_t devid; 7792 7793 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7794 switch (if_type) { 7795 case LPFC_SLI_INTF_IF_TYPE_0: 7796 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7797 GFP_KERNEL); 7798 if (!mboxq) { 7799 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7800 "0494 Unable to allocate memory for " 7801 "issuing SLI_FUNCTION_RESET mailbox " 7802 "command\n"); 7803 return -ENOMEM; 7804 } 7805 7806 /* Setup PCI function reset mailbox-ioctl command */ 7807 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7808 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 7809 LPFC_SLI4_MBX_EMBED); 7810 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7811 shdr = (union lpfc_sli4_cfg_shdr *) 7812 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7813 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7814 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 7815 &shdr->response); 7816 if (rc != MBX_TIMEOUT) 7817 mempool_free(mboxq, phba->mbox_mem_pool); 7818 if (shdr_status || shdr_add_status || rc) { 7819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7820 "0495 SLI_FUNCTION_RESET mailbox " 7821 "failed with status x%x add_status x%x," 7822 " mbx status x%x\n", 7823 shdr_status, shdr_add_status, rc); 7824 rc = -ENXIO; 7825 } 7826 break; 7827 case LPFC_SLI_INTF_IF_TYPE_2: 7828 for (num_resets = 0; 7829 num_resets < MAX_IF_TYPE_2_RESETS; 7830 num_resets++) { 7831 reg_data.word0 = 0; 7832 bf_set(lpfc_sliport_ctrl_end, ®_data, 7833 LPFC_SLIPORT_LITTLE_ENDIAN); 7834 bf_set(lpfc_sliport_ctrl_ip, ®_data, 7835 LPFC_SLIPORT_INIT_PORT); 7836 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 7837 CTRLregaddr); 7838 /* flush */ 7839 pci_read_config_word(phba->pcidev, 7840 PCI_DEVICE_ID, &devid); 7841 /* 7842 * Poll the Port Status Register and wait for RDY for 7843 * up to 10 seconds. If the port doesn't respond, treat 7844 * it as an error. If the port responds with RN, start 7845 * the loop again. 7846 */ 7847 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 7848 msleep(10); 7849 if (lpfc_readl(phba->sli4_hba.u.if_type2. 7850 STATUSregaddr, ®_data.word0)) { 7851 rc = -ENODEV; 7852 goto out; 7853 } 7854 if (bf_get(lpfc_sliport_status_rn, ®_data)) 7855 reset_again++; 7856 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 7857 break; 7858 } 7859 7860 /* 7861 * If the port responds to the init request with 7862 * reset needed, delay for a bit and restart the loop. 7863 */ 7864 if (reset_again && (rdy_chk < 1000)) { 7865 msleep(10); 7866 reset_again = 0; 7867 continue; 7868 } 7869 7870 /* Detect any port errors. */ 7871 if ((bf_get(lpfc_sliport_status_err, ®_data)) || 7872 (rdy_chk >= 1000)) { 7873 phba->work_status[0] = readl( 7874 phba->sli4_hba.u.if_type2.ERR1regaddr); 7875 phba->work_status[1] = readl( 7876 phba->sli4_hba.u.if_type2.ERR2regaddr); 7877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7878 "2890 Port error detected during port " 7879 "reset(%d): wait_tmo:%d ms, " 7880 "port status reg 0x%x, " 7881 "error 1=0x%x, error 2=0x%x\n", 7882 num_resets, rdy_chk*10, 7883 reg_data.word0, 7884 phba->work_status[0], 7885 phba->work_status[1]); 7886 rc = -ENODEV; 7887 } 7888 7889 /* 7890 * Terminate the outer loop provided the Port indicated 7891 * ready within 10 seconds. 7892 */ 7893 if (rdy_chk < 1000) 7894 break; 7895 } 7896 /* delay driver action following IF_TYPE_2 function reset */ 7897 msleep(100); 7898 break; 7899 case LPFC_SLI_INTF_IF_TYPE_1: 7900 default: 7901 break; 7902 } 7903 7904 out: 7905 /* Catch the not-ready port failure after a port reset. */ 7906 if (num_resets >= MAX_IF_TYPE_2_RESETS) { 7907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7908 "3317 HBA not functional: IP Reset Failed " 7909 "after (%d) retries, try: " 7910 "echo fw_reset > board_mode\n", num_resets); 7911 rc = -ENODEV; 7912 } 7913 7914 return rc; 7915 } 7916 7917 /** 7918 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 7919 * @phba: pointer to lpfc hba data structure. 7920 * 7921 * This routine is invoked to set up the PCI device memory space for device 7922 * with SLI-4 interface spec. 7923 * 7924 * Return codes 7925 * 0 - successful 7926 * other values - error 7927 **/ 7928 static int 7929 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 7930 { 7931 struct pci_dev *pdev; 7932 unsigned long bar0map_len, bar1map_len, bar2map_len; 7933 int error = -ENODEV; 7934 uint32_t if_type; 7935 7936 /* Obtain PCI device reference */ 7937 if (!phba->pcidev) 7938 return error; 7939 else 7940 pdev = phba->pcidev; 7941 7942 /* Set the device DMA mask size */ 7943 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 7944 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 7945 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 7946 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 7947 return error; 7948 } 7949 } 7950 7951 /* 7952 * The BARs and register set definitions and offset locations are 7953 * dependent on the if_type. 7954 */ 7955 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 7956 &phba->sli4_hba.sli_intf.word0)) { 7957 return error; 7958 } 7959 7960 /* There is no SLI3 failback for SLI4 devices. */ 7961 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 7962 LPFC_SLI_INTF_VALID) { 7963 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7964 "2894 SLI_INTF reg contents invalid " 7965 "sli_intf reg 0x%x\n", 7966 phba->sli4_hba.sli_intf.word0); 7967 return error; 7968 } 7969 7970 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7971 /* 7972 * Get the bus address of SLI4 device Bar regions and the 7973 * number of bytes required by each mapping. The mapping of the 7974 * particular PCI BARs regions is dependent on the type of 7975 * SLI4 device. 7976 */ 7977 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 7978 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 7979 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 7980 7981 /* 7982 * Map SLI4 PCI Config Space Register base to a kernel virtual 7983 * addr 7984 */ 7985 phba->sli4_hba.conf_regs_memmap_p = 7986 ioremap(phba->pci_bar0_map, bar0map_len); 7987 if (!phba->sli4_hba.conf_regs_memmap_p) { 7988 dev_printk(KERN_ERR, &pdev->dev, 7989 "ioremap failed for SLI4 PCI config " 7990 "registers.\n"); 7991 goto out; 7992 } 7993 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 7994 /* Set up BAR0 PCI config space register memory map */ 7995 lpfc_sli4_bar0_register_memmap(phba, if_type); 7996 } else { 7997 phba->pci_bar0_map = pci_resource_start(pdev, 1); 7998 bar0map_len = pci_resource_len(pdev, 1); 7999 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8000 dev_printk(KERN_ERR, &pdev->dev, 8001 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 8002 goto out; 8003 } 8004 phba->sli4_hba.conf_regs_memmap_p = 8005 ioremap(phba->pci_bar0_map, bar0map_len); 8006 if (!phba->sli4_hba.conf_regs_memmap_p) { 8007 dev_printk(KERN_ERR, &pdev->dev, 8008 "ioremap failed for SLI4 PCI config " 8009 "registers.\n"); 8010 goto out; 8011 } 8012 lpfc_sli4_bar0_register_memmap(phba, if_type); 8013 } 8014 8015 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8016 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 8017 /* 8018 * Map SLI4 if type 0 HBA Control Register base to a kernel 8019 * virtual address and setup the registers. 8020 */ 8021 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 8022 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 8023 phba->sli4_hba.ctrl_regs_memmap_p = 8024 ioremap(phba->pci_bar1_map, bar1map_len); 8025 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 8026 dev_printk(KERN_ERR, &pdev->dev, 8027 "ioremap failed for SLI4 HBA control registers.\n"); 8028 goto out_iounmap_conf; 8029 } 8030 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; 8031 lpfc_sli4_bar1_register_memmap(phba); 8032 } 8033 8034 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8035 (pci_resource_start(pdev, PCI_64BIT_BAR4))) { 8036 /* 8037 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 8038 * virtual address and setup the registers. 8039 */ 8040 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 8041 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 8042 phba->sli4_hba.drbl_regs_memmap_p = 8043 ioremap(phba->pci_bar2_map, bar2map_len); 8044 if (!phba->sli4_hba.drbl_regs_memmap_p) { 8045 dev_printk(KERN_ERR, &pdev->dev, 8046 "ioremap failed for SLI4 HBA doorbell registers.\n"); 8047 goto out_iounmap_ctrl; 8048 } 8049 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 8050 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 8051 if (error) 8052 goto out_iounmap_all; 8053 } 8054 8055 return 0; 8056 8057 out_iounmap_all: 8058 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8059 out_iounmap_ctrl: 8060 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8061 out_iounmap_conf: 8062 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8063 out: 8064 return error; 8065 } 8066 8067 /** 8068 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 8069 * @phba: pointer to lpfc hba data structure. 8070 * 8071 * This routine is invoked to unset the PCI device memory space for device 8072 * with SLI-4 interface spec. 8073 **/ 8074 static void 8075 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 8076 { 8077 uint32_t if_type; 8078 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8079 8080 switch (if_type) { 8081 case LPFC_SLI_INTF_IF_TYPE_0: 8082 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8083 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8084 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8085 break; 8086 case LPFC_SLI_INTF_IF_TYPE_2: 8087 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8088 break; 8089 case LPFC_SLI_INTF_IF_TYPE_1: 8090 default: 8091 dev_printk(KERN_ERR, &phba->pcidev->dev, 8092 "FATAL - unsupported SLI4 interface type - %d\n", 8093 if_type); 8094 break; 8095 } 8096 } 8097 8098 /** 8099 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 8100 * @phba: pointer to lpfc hba data structure. 8101 * 8102 * This routine is invoked to enable the MSI-X interrupt vectors to device 8103 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 8104 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 8105 * invoked, enables either all or nothing, depending on the current 8106 * availability of PCI vector resources. The device driver is responsible 8107 * for calling the individual request_irq() to register each MSI-X vector 8108 * with a interrupt handler, which is done in this function. Note that 8109 * later when device is unloading, the driver should always call free_irq() 8110 * on all MSI-X vectors it has done request_irq() on before calling 8111 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 8112 * will be left with MSI-X enabled and leaks its vectors. 8113 * 8114 * Return codes 8115 * 0 - successful 8116 * other values - error 8117 **/ 8118 static int 8119 lpfc_sli_enable_msix(struct lpfc_hba *phba) 8120 { 8121 int rc, i; 8122 LPFC_MBOXQ_t *pmb; 8123 8124 /* Set up MSI-X multi-message vectors */ 8125 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8126 phba->msix_entries[i].entry = i; 8127 8128 /* Configure MSI-X capability structure */ 8129 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 8130 ARRAY_SIZE(phba->msix_entries)); 8131 if (rc) { 8132 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8133 "0420 PCI enable MSI-X failed (%d)\n", rc); 8134 goto msi_fail_out; 8135 } 8136 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8137 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8138 "0477 MSI-X entry[%d]: vector=x%x " 8139 "message=%d\n", i, 8140 phba->msix_entries[i].vector, 8141 phba->msix_entries[i].entry); 8142 /* 8143 * Assign MSI-X vectors to interrupt handlers 8144 */ 8145 8146 /* vector-0 is associated to slow-path handler */ 8147 rc = request_irq(phba->msix_entries[0].vector, 8148 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 8149 LPFC_SP_DRIVER_HANDLER_NAME, phba); 8150 if (rc) { 8151 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8152 "0421 MSI-X slow-path request_irq failed " 8153 "(%d)\n", rc); 8154 goto msi_fail_out; 8155 } 8156 8157 /* vector-1 is associated to fast-path handler */ 8158 rc = request_irq(phba->msix_entries[1].vector, 8159 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 8160 LPFC_FP_DRIVER_HANDLER_NAME, phba); 8161 8162 if (rc) { 8163 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8164 "0429 MSI-X fast-path request_irq failed " 8165 "(%d)\n", rc); 8166 goto irq_fail_out; 8167 } 8168 8169 /* 8170 * Configure HBA MSI-X attention conditions to messages 8171 */ 8172 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8173 8174 if (!pmb) { 8175 rc = -ENOMEM; 8176 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8177 "0474 Unable to allocate memory for issuing " 8178 "MBOX_CONFIG_MSI command\n"); 8179 goto mem_fail_out; 8180 } 8181 rc = lpfc_config_msi(phba, pmb); 8182 if (rc) 8183 goto mbx_fail_out; 8184 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8185 if (rc != MBX_SUCCESS) { 8186 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 8187 "0351 Config MSI mailbox command failed, " 8188 "mbxCmd x%x, mbxStatus x%x\n", 8189 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 8190 goto mbx_fail_out; 8191 } 8192 8193 /* Free memory allocated for mailbox command */ 8194 mempool_free(pmb, phba->mbox_mem_pool); 8195 return rc; 8196 8197 mbx_fail_out: 8198 /* Free memory allocated for mailbox command */ 8199 mempool_free(pmb, phba->mbox_mem_pool); 8200 8201 mem_fail_out: 8202 /* free the irq already requested */ 8203 free_irq(phba->msix_entries[1].vector, phba); 8204 8205 irq_fail_out: 8206 /* free the irq already requested */ 8207 free_irq(phba->msix_entries[0].vector, phba); 8208 8209 msi_fail_out: 8210 /* Unconfigure MSI-X capability structure */ 8211 pci_disable_msix(phba->pcidev); 8212 return rc; 8213 } 8214 8215 /** 8216 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 8217 * @phba: pointer to lpfc hba data structure. 8218 * 8219 * This routine is invoked to release the MSI-X vectors and then disable the 8220 * MSI-X interrupt mode to device with SLI-3 interface spec. 8221 **/ 8222 static void 8223 lpfc_sli_disable_msix(struct lpfc_hba *phba) 8224 { 8225 int i; 8226 8227 /* Free up MSI-X multi-message vectors */ 8228 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8229 free_irq(phba->msix_entries[i].vector, phba); 8230 /* Disable MSI-X */ 8231 pci_disable_msix(phba->pcidev); 8232 8233 return; 8234 } 8235 8236 /** 8237 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 8238 * @phba: pointer to lpfc hba data structure. 8239 * 8240 * This routine is invoked to enable the MSI interrupt mode to device with 8241 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 8242 * enable the MSI vector. The device driver is responsible for calling the 8243 * request_irq() to register MSI vector with a interrupt the handler, which 8244 * is done in this function. 8245 * 8246 * Return codes 8247 * 0 - successful 8248 * other values - error 8249 */ 8250 static int 8251 lpfc_sli_enable_msi(struct lpfc_hba *phba) 8252 { 8253 int rc; 8254 8255 rc = pci_enable_msi(phba->pcidev); 8256 if (!rc) 8257 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8258 "0462 PCI enable MSI mode success.\n"); 8259 else { 8260 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8261 "0471 PCI enable MSI mode failed (%d)\n", rc); 8262 return rc; 8263 } 8264 8265 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8266 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8267 if (rc) { 8268 pci_disable_msi(phba->pcidev); 8269 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8270 "0478 MSI request_irq failed (%d)\n", rc); 8271 } 8272 return rc; 8273 } 8274 8275 /** 8276 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 8277 * @phba: pointer to lpfc hba data structure. 8278 * 8279 * This routine is invoked to disable the MSI interrupt mode to device with 8280 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 8281 * done request_irq() on before calling pci_disable_msi(). Failure to do so 8282 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 8283 * its vector. 8284 */ 8285 static void 8286 lpfc_sli_disable_msi(struct lpfc_hba *phba) 8287 { 8288 free_irq(phba->pcidev->irq, phba); 8289 pci_disable_msi(phba->pcidev); 8290 return; 8291 } 8292 8293 /** 8294 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 8295 * @phba: pointer to lpfc hba data structure. 8296 * 8297 * This routine is invoked to enable device interrupt and associate driver's 8298 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 8299 * spec. Depends on the interrupt mode configured to the driver, the driver 8300 * will try to fallback from the configured interrupt mode to an interrupt 8301 * mode which is supported by the platform, kernel, and device in the order 8302 * of: 8303 * MSI-X -> MSI -> IRQ. 8304 * 8305 * Return codes 8306 * 0 - successful 8307 * other values - error 8308 **/ 8309 static uint32_t 8310 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8311 { 8312 uint32_t intr_mode = LPFC_INTR_ERROR; 8313 int retval; 8314 8315 if (cfg_mode == 2) { 8316 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 8317 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 8318 if (!retval) { 8319 /* Now, try to enable MSI-X interrupt mode */ 8320 retval = lpfc_sli_enable_msix(phba); 8321 if (!retval) { 8322 /* Indicate initialization to MSI-X mode */ 8323 phba->intr_type = MSIX; 8324 intr_mode = 2; 8325 } 8326 } 8327 } 8328 8329 /* Fallback to MSI if MSI-X initialization failed */ 8330 if (cfg_mode >= 1 && phba->intr_type == NONE) { 8331 retval = lpfc_sli_enable_msi(phba); 8332 if (!retval) { 8333 /* Indicate initialization to MSI mode */ 8334 phba->intr_type = MSI; 8335 intr_mode = 1; 8336 } 8337 } 8338 8339 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 8340 if (phba->intr_type == NONE) { 8341 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8342 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8343 if (!retval) { 8344 /* Indicate initialization to INTx mode */ 8345 phba->intr_type = INTx; 8346 intr_mode = 0; 8347 } 8348 } 8349 return intr_mode; 8350 } 8351 8352 /** 8353 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 8354 * @phba: pointer to lpfc hba data structure. 8355 * 8356 * This routine is invoked to disable device interrupt and disassociate the 8357 * driver's interrupt handler(s) from interrupt vector(s) to device with 8358 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 8359 * release the interrupt vector(s) for the message signaled interrupt. 8360 **/ 8361 static void 8362 lpfc_sli_disable_intr(struct lpfc_hba *phba) 8363 { 8364 /* Disable the currently initialized interrupt mode */ 8365 if (phba->intr_type == MSIX) 8366 lpfc_sli_disable_msix(phba); 8367 else if (phba->intr_type == MSI) 8368 lpfc_sli_disable_msi(phba); 8369 else if (phba->intr_type == INTx) 8370 free_irq(phba->pcidev->irq, phba); 8371 8372 /* Reset interrupt management states */ 8373 phba->intr_type = NONE; 8374 phba->sli.slistat.sli_intr = 0; 8375 8376 return; 8377 } 8378 8379 /** 8380 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id 8381 * @phba: pointer to lpfc hba data structure. 8382 * 8383 * Find next available CPU to use for IRQ to CPU affinity. 8384 */ 8385 static int 8386 lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) 8387 { 8388 struct lpfc_vector_map_info *cpup; 8389 int cpu; 8390 8391 cpup = phba->sli4_hba.cpu_map; 8392 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8393 /* CPU must be online */ 8394 if (cpu_online(cpu)) { 8395 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8396 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && 8397 (cpup->phys_id == phys_id)) { 8398 return cpu; 8399 } 8400 } 8401 cpup++; 8402 } 8403 8404 /* 8405 * If we get here, we have used ALL CPUs for the specific 8406 * phys_id. Now we need to clear out lpfc_used_cpu and start 8407 * reusing CPUs. 8408 */ 8409 8410 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8411 if (lpfc_used_cpu[cpu] == phys_id) 8412 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; 8413 } 8414 8415 cpup = phba->sli4_hba.cpu_map; 8416 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8417 /* CPU must be online */ 8418 if (cpu_online(cpu)) { 8419 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8420 (cpup->phys_id == phys_id)) { 8421 return cpu; 8422 } 8423 } 8424 cpup++; 8425 } 8426 return LPFC_VECTOR_MAP_EMPTY; 8427 } 8428 8429 /** 8430 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors 8431 * @phba: pointer to lpfc hba data structure. 8432 * @vectors: number of HBA vectors 8433 * 8434 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector 8435 * affinization across multple physical CPUs (numa nodes). 8436 * In addition, this routine will assign an IO channel for each CPU 8437 * to use when issuing I/Os. 8438 */ 8439 static int 8440 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) 8441 { 8442 int i, idx, saved_chann, used_chann, cpu, phys_id; 8443 int max_phys_id, min_phys_id; 8444 int num_io_channel, first_cpu, chan; 8445 struct lpfc_vector_map_info *cpup; 8446 #ifdef CONFIG_X86 8447 struct cpuinfo_x86 *cpuinfo; 8448 #endif 8449 struct cpumask *mask; 8450 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; 8451 8452 /* If there is no mapping, just return */ 8453 if (!phba->cfg_fcp_cpu_map) 8454 return 1; 8455 8456 /* Init cpu_map array */ 8457 memset(phba->sli4_hba.cpu_map, 0xff, 8458 (sizeof(struct lpfc_vector_map_info) * 8459 phba->sli4_hba.num_present_cpu)); 8460 8461 max_phys_id = 0; 8462 min_phys_id = 0xff; 8463 phys_id = 0; 8464 num_io_channel = 0; 8465 first_cpu = LPFC_VECTOR_MAP_EMPTY; 8466 8467 /* Update CPU map with physical id and core id of each CPU */ 8468 cpup = phba->sli4_hba.cpu_map; 8469 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8470 #ifdef CONFIG_X86 8471 cpuinfo = &cpu_data(cpu); 8472 cpup->phys_id = cpuinfo->phys_proc_id; 8473 cpup->core_id = cpuinfo->cpu_core_id; 8474 #else 8475 /* No distinction between CPUs for other platforms */ 8476 cpup->phys_id = 0; 8477 cpup->core_id = 0; 8478 #endif 8479 8480 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8481 "3328 CPU physid %d coreid %d\n", 8482 cpup->phys_id, cpup->core_id); 8483 8484 if (cpup->phys_id > max_phys_id) 8485 max_phys_id = cpup->phys_id; 8486 if (cpup->phys_id < min_phys_id) 8487 min_phys_id = cpup->phys_id; 8488 cpup++; 8489 } 8490 8491 phys_id = min_phys_id; 8492 /* Now associate the HBA vectors with specific CPUs */ 8493 for (idx = 0; idx < vectors; idx++) { 8494 cpup = phba->sli4_hba.cpu_map; 8495 cpu = lpfc_find_next_cpu(phba, phys_id); 8496 if (cpu == LPFC_VECTOR_MAP_EMPTY) { 8497 8498 /* Try for all phys_id's */ 8499 for (i = 1; i < max_phys_id; i++) { 8500 phys_id++; 8501 if (phys_id > max_phys_id) 8502 phys_id = min_phys_id; 8503 cpu = lpfc_find_next_cpu(phba, phys_id); 8504 if (cpu == LPFC_VECTOR_MAP_EMPTY) 8505 continue; 8506 goto found; 8507 } 8508 8509 /* Use round robin for scheduling */ 8510 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; 8511 chan = 0; 8512 cpup = phba->sli4_hba.cpu_map; 8513 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 8514 cpup->channel_id = chan; 8515 cpup++; 8516 chan++; 8517 if (chan >= phba->cfg_fcp_io_channel) 8518 chan = 0; 8519 } 8520 8521 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8522 "3329 Cannot set affinity:" 8523 "Error mapping vector %d (%d)\n", 8524 idx, vectors); 8525 return 0; 8526 } 8527 found: 8528 cpup += cpu; 8529 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) 8530 lpfc_used_cpu[cpu] = phys_id; 8531 8532 /* Associate vector with selected CPU */ 8533 cpup->irq = phba->sli4_hba.msix_entries[idx].vector; 8534 8535 /* Associate IO channel with selected CPU */ 8536 cpup->channel_id = idx; 8537 num_io_channel++; 8538 8539 if (first_cpu == LPFC_VECTOR_MAP_EMPTY) 8540 first_cpu = cpu; 8541 8542 /* Now affinitize to the selected CPU */ 8543 mask = &cpup->maskbits; 8544 cpumask_clear(mask); 8545 cpumask_set_cpu(cpu, mask); 8546 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. 8547 vector, mask); 8548 8549 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8550 "3330 Set Affinity: CPU %d channel %d " 8551 "irq %d (%x)\n", 8552 cpu, cpup->channel_id, 8553 phba->sli4_hba.msix_entries[idx].vector, i); 8554 8555 /* Spread vector mapping across multple physical CPU nodes */ 8556 phys_id++; 8557 if (phys_id > max_phys_id) 8558 phys_id = min_phys_id; 8559 } 8560 8561 /* 8562 * Finally fill in the IO channel for any remaining CPUs. 8563 * At this point, all IO channels have been assigned to a specific 8564 * MSIx vector, mapped to a specific CPU. 8565 * Base the remaining IO channel assigned, to IO channels already 8566 * assigned to other CPUs on the same phys_id. 8567 */ 8568 for (i = min_phys_id; i <= max_phys_id; i++) { 8569 /* 8570 * If there are no io channels already mapped to 8571 * this phys_id, just round robin thru the io_channels. 8572 * Setup chann[] for round robin. 8573 */ 8574 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) 8575 chann[idx] = idx; 8576 8577 saved_chann = 0; 8578 used_chann = 0; 8579 8580 /* 8581 * First build a list of IO channels already assigned 8582 * to this phys_id before reassigning the same IO 8583 * channels to the remaining CPUs. 8584 */ 8585 cpup = phba->sli4_hba.cpu_map; 8586 cpu = first_cpu; 8587 cpup += cpu; 8588 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; 8589 idx++) { 8590 if (cpup->phys_id == i) { 8591 /* 8592 * Save any IO channels that are 8593 * already mapped to this phys_id. 8594 */ 8595 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { 8596 chann[saved_chann] = 8597 cpup->channel_id; 8598 saved_chann++; 8599 goto out; 8600 } 8601 8602 /* See if we are using round-robin */ 8603 if (saved_chann == 0) 8604 saved_chann = 8605 phba->cfg_fcp_io_channel; 8606 8607 /* Associate next IO channel with CPU */ 8608 cpup->channel_id = chann[used_chann]; 8609 num_io_channel++; 8610 used_chann++; 8611 if (used_chann == saved_chann) 8612 used_chann = 0; 8613 8614 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8615 "3331 Set IO_CHANN " 8616 "CPU %d channel %d\n", 8617 idx, cpup->channel_id); 8618 } 8619 out: 8620 cpu++; 8621 if (cpu >= phba->sli4_hba.num_present_cpu) { 8622 cpup = phba->sli4_hba.cpu_map; 8623 cpu = 0; 8624 } else { 8625 cpup++; 8626 } 8627 } 8628 } 8629 8630 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { 8631 cpup = phba->sli4_hba.cpu_map; 8632 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { 8633 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { 8634 cpup->channel_id = 0; 8635 num_io_channel++; 8636 8637 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8638 "3332 Assign IO_CHANN " 8639 "CPU %d channel %d\n", 8640 idx, cpup->channel_id); 8641 } 8642 cpup++; 8643 } 8644 } 8645 8646 /* Sanity check */ 8647 if (num_io_channel != phba->sli4_hba.num_present_cpu) 8648 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8649 "3333 Set affinity mismatch:" 8650 "%d chann != %d cpus: %d vectors\n", 8651 num_io_channel, phba->sli4_hba.num_present_cpu, 8652 vectors); 8653 8654 /* Enable using cpu affinity for scheduling */ 8655 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; 8656 return 1; 8657 } 8658 8659 8660 /** 8661 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 8662 * @phba: pointer to lpfc hba data structure. 8663 * 8664 * This routine is invoked to enable the MSI-X interrupt vectors to device 8665 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 8666 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 8667 * enables either all or nothing, depending on the current availability of 8668 * PCI vector resources. The device driver is responsible for calling the 8669 * individual request_irq() to register each MSI-X vector with a interrupt 8670 * handler, which is done in this function. Note that later when device is 8671 * unloading, the driver should always call free_irq() on all MSI-X vectors 8672 * it has done request_irq() on before calling pci_disable_msix(). Failure 8673 * to do so results in a BUG_ON() and a device will be left with MSI-X 8674 * enabled and leaks its vectors. 8675 * 8676 * Return codes 8677 * 0 - successful 8678 * other values - error 8679 **/ 8680 static int 8681 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 8682 { 8683 int vectors, rc, index; 8684 8685 /* Set up MSI-X multi-message vectors */ 8686 for (index = 0; index < phba->cfg_fcp_io_channel; index++) 8687 phba->sli4_hba.msix_entries[index].entry = index; 8688 8689 /* Configure MSI-X capability structure */ 8690 vectors = phba->cfg_fcp_io_channel; 8691 if (phba->cfg_fof) { 8692 phba->sli4_hba.msix_entries[index].entry = index; 8693 vectors++; 8694 } 8695 enable_msix_vectors: 8696 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 8697 vectors); 8698 if (rc > 1) { 8699 vectors = rc; 8700 goto enable_msix_vectors; 8701 } else if (rc) { 8702 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8703 "0484 PCI enable MSI-X failed (%d)\n", rc); 8704 goto msi_fail_out; 8705 } 8706 8707 /* Log MSI-X vector assignment */ 8708 for (index = 0; index < vectors; index++) 8709 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8710 "0489 MSI-X entry[%d]: vector=x%x " 8711 "message=%d\n", index, 8712 phba->sli4_hba.msix_entries[index].vector, 8713 phba->sli4_hba.msix_entries[index].entry); 8714 8715 /* Assign MSI-X vectors to interrupt handlers */ 8716 for (index = 0; index < vectors; index++) { 8717 memset(&phba->sli4_hba.handler_name[index], 0, 16); 8718 sprintf((char *)&phba->sli4_hba.handler_name[index], 8719 LPFC_DRIVER_HANDLER_NAME"%d", index); 8720 8721 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8722 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8723 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); 8724 if (phba->cfg_fof && (index == (vectors - 1))) 8725 rc = request_irq( 8726 phba->sli4_hba.msix_entries[index].vector, 8727 &lpfc_sli4_fof_intr_handler, IRQF_SHARED, 8728 (char *)&phba->sli4_hba.handler_name[index], 8729 &phba->sli4_hba.fcp_eq_hdl[index]); 8730 else 8731 rc = request_irq( 8732 phba->sli4_hba.msix_entries[index].vector, 8733 &lpfc_sli4_hba_intr_handler, IRQF_SHARED, 8734 (char *)&phba->sli4_hba.handler_name[index], 8735 &phba->sli4_hba.fcp_eq_hdl[index]); 8736 if (rc) { 8737 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8738 "0486 MSI-X fast-path (%d) " 8739 "request_irq failed (%d)\n", index, rc); 8740 goto cfg_fail_out; 8741 } 8742 } 8743 8744 if (phba->cfg_fof) 8745 vectors--; 8746 8747 if (vectors != phba->cfg_fcp_io_channel) { 8748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8749 "3238 Reducing IO channels to match number of " 8750 "MSI-X vectors, requested %d got %d\n", 8751 phba->cfg_fcp_io_channel, vectors); 8752 phba->cfg_fcp_io_channel = vectors; 8753 } 8754 8755 lpfc_sli4_set_affinity(phba, vectors); 8756 return rc; 8757 8758 cfg_fail_out: 8759 /* free the irq already requested */ 8760 for (--index; index >= 0; index--) { 8761 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 8762 vector, NULL); 8763 free_irq(phba->sli4_hba.msix_entries[index].vector, 8764 &phba->sli4_hba.fcp_eq_hdl[index]); 8765 } 8766 8767 msi_fail_out: 8768 /* Unconfigure MSI-X capability structure */ 8769 pci_disable_msix(phba->pcidev); 8770 return rc; 8771 } 8772 8773 /** 8774 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 8775 * @phba: pointer to lpfc hba data structure. 8776 * 8777 * This routine is invoked to release the MSI-X vectors and then disable the 8778 * MSI-X interrupt mode to device with SLI-4 interface spec. 8779 **/ 8780 static void 8781 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 8782 { 8783 int index; 8784 8785 /* Free up MSI-X multi-message vectors */ 8786 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 8787 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 8788 vector, NULL); 8789 free_irq(phba->sli4_hba.msix_entries[index].vector, 8790 &phba->sli4_hba.fcp_eq_hdl[index]); 8791 } 8792 if (phba->cfg_fof) { 8793 free_irq(phba->sli4_hba.msix_entries[index].vector, 8794 &phba->sli4_hba.fcp_eq_hdl[index]); 8795 } 8796 /* Disable MSI-X */ 8797 pci_disable_msix(phba->pcidev); 8798 8799 return; 8800 } 8801 8802 /** 8803 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 8804 * @phba: pointer to lpfc hba data structure. 8805 * 8806 * This routine is invoked to enable the MSI interrupt mode to device with 8807 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 8808 * to enable the MSI vector. The device driver is responsible for calling 8809 * the request_irq() to register MSI vector with a interrupt the handler, 8810 * which is done in this function. 8811 * 8812 * Return codes 8813 * 0 - successful 8814 * other values - error 8815 **/ 8816 static int 8817 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 8818 { 8819 int rc, index; 8820 8821 rc = pci_enable_msi(phba->pcidev); 8822 if (!rc) 8823 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8824 "0487 PCI enable MSI mode success.\n"); 8825 else { 8826 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8827 "0488 PCI enable MSI mode failed (%d)\n", rc); 8828 return rc; 8829 } 8830 8831 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 8832 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8833 if (rc) { 8834 pci_disable_msi(phba->pcidev); 8835 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8836 "0490 MSI request_irq failed (%d)\n", rc); 8837 return rc; 8838 } 8839 8840 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 8841 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8842 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8843 } 8844 8845 if (phba->cfg_fof) { 8846 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8847 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8848 } 8849 return 0; 8850 } 8851 8852 /** 8853 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 8854 * @phba: pointer to lpfc hba data structure. 8855 * 8856 * This routine is invoked to disable the MSI interrupt mode to device with 8857 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 8858 * done request_irq() on before calling pci_disable_msi(). Failure to do so 8859 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 8860 * its vector. 8861 **/ 8862 static void 8863 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 8864 { 8865 free_irq(phba->pcidev->irq, phba); 8866 pci_disable_msi(phba->pcidev); 8867 return; 8868 } 8869 8870 /** 8871 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 8872 * @phba: pointer to lpfc hba data structure. 8873 * 8874 * This routine is invoked to enable device interrupt and associate driver's 8875 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 8876 * interface spec. Depends on the interrupt mode configured to the driver, 8877 * the driver will try to fallback from the configured interrupt mode to an 8878 * interrupt mode which is supported by the platform, kernel, and device in 8879 * the order of: 8880 * MSI-X -> MSI -> IRQ. 8881 * 8882 * Return codes 8883 * 0 - successful 8884 * other values - error 8885 **/ 8886 static uint32_t 8887 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8888 { 8889 uint32_t intr_mode = LPFC_INTR_ERROR; 8890 int retval, index; 8891 8892 if (cfg_mode == 2) { 8893 /* Preparation before conf_msi mbox cmd */ 8894 retval = 0; 8895 if (!retval) { 8896 /* Now, try to enable MSI-X interrupt mode */ 8897 retval = lpfc_sli4_enable_msix(phba); 8898 if (!retval) { 8899 /* Indicate initialization to MSI-X mode */ 8900 phba->intr_type = MSIX; 8901 intr_mode = 2; 8902 } 8903 } 8904 } 8905 8906 /* Fallback to MSI if MSI-X initialization failed */ 8907 if (cfg_mode >= 1 && phba->intr_type == NONE) { 8908 retval = lpfc_sli4_enable_msi(phba); 8909 if (!retval) { 8910 /* Indicate initialization to MSI mode */ 8911 phba->intr_type = MSI; 8912 intr_mode = 1; 8913 } 8914 } 8915 8916 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 8917 if (phba->intr_type == NONE) { 8918 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 8919 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8920 if (!retval) { 8921 /* Indicate initialization to INTx mode */ 8922 phba->intr_type = INTx; 8923 intr_mode = 0; 8924 for (index = 0; index < phba->cfg_fcp_io_channel; 8925 index++) { 8926 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8927 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8928 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 8929 fcp_eq_in_use, 1); 8930 } 8931 if (phba->cfg_fof) { 8932 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8933 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8934 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 8935 fcp_eq_in_use, 1); 8936 } 8937 } 8938 } 8939 return intr_mode; 8940 } 8941 8942 /** 8943 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 8944 * @phba: pointer to lpfc hba data structure. 8945 * 8946 * This routine is invoked to disable device interrupt and disassociate 8947 * the driver's interrupt handler(s) from interrupt vector(s) to device 8948 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 8949 * will release the interrupt vector(s) for the message signaled interrupt. 8950 **/ 8951 static void 8952 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 8953 { 8954 /* Disable the currently initialized interrupt mode */ 8955 if (phba->intr_type == MSIX) 8956 lpfc_sli4_disable_msix(phba); 8957 else if (phba->intr_type == MSI) 8958 lpfc_sli4_disable_msi(phba); 8959 else if (phba->intr_type == INTx) 8960 free_irq(phba->pcidev->irq, phba); 8961 8962 /* Reset interrupt management states */ 8963 phba->intr_type = NONE; 8964 phba->sli.slistat.sli_intr = 0; 8965 8966 return; 8967 } 8968 8969 /** 8970 * lpfc_unset_hba - Unset SLI3 hba device initialization 8971 * @phba: pointer to lpfc hba data structure. 8972 * 8973 * This routine is invoked to unset the HBA device initialization steps to 8974 * a device with SLI-3 interface spec. 8975 **/ 8976 static void 8977 lpfc_unset_hba(struct lpfc_hba *phba) 8978 { 8979 struct lpfc_vport *vport = phba->pport; 8980 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8981 8982 spin_lock_irq(shost->host_lock); 8983 vport->load_flag |= FC_UNLOADING; 8984 spin_unlock_irq(shost->host_lock); 8985 8986 kfree(phba->vpi_bmask); 8987 kfree(phba->vpi_ids); 8988 8989 lpfc_stop_hba_timers(phba); 8990 8991 phba->pport->work_port_events = 0; 8992 8993 lpfc_sli_hba_down(phba); 8994 8995 lpfc_sli_brdrestart(phba); 8996 8997 lpfc_sli_disable_intr(phba); 8998 8999 return; 9000 } 9001 9002 /** 9003 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 9004 * @phba: Pointer to HBA context object. 9005 * 9006 * This function is called in the SLI4 code path to wait for completion 9007 * of device's XRIs exchange busy. It will check the XRI exchange busy 9008 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 9009 * that, it will check the XRI exchange busy on outstanding FCP and ELS 9010 * I/Os every 30 seconds, log error message, and wait forever. Only when 9011 * all XRI exchange busy complete, the driver unload shall proceed with 9012 * invoking the function reset ioctl mailbox command to the CNA and the 9013 * the rest of the driver unload resource release. 9014 **/ 9015 static void 9016 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 9017 { 9018 int wait_time = 0; 9019 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9020 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9021 9022 while (!fcp_xri_cmpl || !els_xri_cmpl) { 9023 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 9024 if (!fcp_xri_cmpl) 9025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9026 "2877 FCP XRI exchange busy " 9027 "wait time: %d seconds.\n", 9028 wait_time/1000); 9029 if (!els_xri_cmpl) 9030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9031 "2878 ELS XRI exchange busy " 9032 "wait time: %d seconds.\n", 9033 wait_time/1000); 9034 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 9035 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 9036 } else { 9037 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 9038 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 9039 } 9040 fcp_xri_cmpl = 9041 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9042 els_xri_cmpl = 9043 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9044 } 9045 } 9046 9047 /** 9048 * lpfc_sli4_hba_unset - Unset the fcoe hba 9049 * @phba: Pointer to HBA context object. 9050 * 9051 * This function is called in the SLI4 code path to reset the HBA's FCoE 9052 * function. The caller is not required to hold any lock. This routine 9053 * issues PCI function reset mailbox command to reset the FCoE function. 9054 * At the end of the function, it calls lpfc_hba_down_post function to 9055 * free any pending commands. 9056 **/ 9057 static void 9058 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 9059 { 9060 int wait_cnt = 0; 9061 LPFC_MBOXQ_t *mboxq; 9062 struct pci_dev *pdev = phba->pcidev; 9063 9064 lpfc_stop_hba_timers(phba); 9065 phba->sli4_hba.intr_enable = 0; 9066 9067 /* 9068 * Gracefully wait out the potential current outstanding asynchronous 9069 * mailbox command. 9070 */ 9071 9072 /* First, block any pending async mailbox command from posted */ 9073 spin_lock_irq(&phba->hbalock); 9074 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9075 spin_unlock_irq(&phba->hbalock); 9076 /* Now, trying to wait it out if we can */ 9077 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9078 msleep(10); 9079 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 9080 break; 9081 } 9082 /* Forcefully release the outstanding mailbox command if timed out */ 9083 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9084 spin_lock_irq(&phba->hbalock); 9085 mboxq = phba->sli.mbox_active; 9086 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 9087 __lpfc_mbox_cmpl_put(phba, mboxq); 9088 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9089 phba->sli.mbox_active = NULL; 9090 spin_unlock_irq(&phba->hbalock); 9091 } 9092 9093 /* Abort all iocbs associated with the hba */ 9094 lpfc_sli_hba_iocb_abort(phba); 9095 9096 /* Wait for completion of device XRI exchange busy */ 9097 lpfc_sli4_xri_exchange_busy_wait(phba); 9098 9099 /* Disable PCI subsystem interrupt */ 9100 lpfc_sli4_disable_intr(phba); 9101 9102 /* Disable SR-IOV if enabled */ 9103 if (phba->cfg_sriov_nr_virtfn) 9104 pci_disable_sriov(pdev); 9105 9106 /* Stop kthread signal shall trigger work_done one more time */ 9107 kthread_stop(phba->worker_thread); 9108 9109 /* Reset SLI4 HBA FCoE function */ 9110 lpfc_pci_function_reset(phba); 9111 lpfc_sli4_queue_destroy(phba); 9112 9113 /* Stop the SLI4 device port */ 9114 phba->pport->work_port_events = 0; 9115 } 9116 9117 /** 9118 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 9119 * @phba: Pointer to HBA context object. 9120 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9121 * 9122 * This function is called in the SLI4 code path to read the port's 9123 * sli4 capabilities. 9124 * 9125 * This function may be be called from any context that can block-wait 9126 * for the completion. The expectation is that this routine is called 9127 * typically from probe_one or from the online routine. 9128 **/ 9129 int 9130 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9131 { 9132 int rc; 9133 struct lpfc_mqe *mqe; 9134 struct lpfc_pc_sli4_params *sli4_params; 9135 uint32_t mbox_tmo; 9136 9137 rc = 0; 9138 mqe = &mboxq->u.mqe; 9139 9140 /* Read the port's SLI4 Parameters port capabilities */ 9141 lpfc_pc_sli4_params(mboxq); 9142 if (!phba->sli4_hba.intr_enable) 9143 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9144 else { 9145 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9146 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9147 } 9148 9149 if (unlikely(rc)) 9150 return 1; 9151 9152 sli4_params = &phba->sli4_hba.pc_sli4_params; 9153 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 9154 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 9155 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 9156 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 9157 &mqe->un.sli4_params); 9158 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 9159 &mqe->un.sli4_params); 9160 sli4_params->proto_types = mqe->un.sli4_params.word3; 9161 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 9162 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 9163 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 9164 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 9165 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 9166 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 9167 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 9168 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 9169 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 9170 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 9171 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 9172 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 9173 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 9174 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 9175 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 9176 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 9177 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 9178 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 9179 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 9180 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 9181 9182 /* Make sure that sge_supp_len can be handled by the driver */ 9183 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9184 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9185 9186 return rc; 9187 } 9188 9189 /** 9190 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 9191 * @phba: Pointer to HBA context object. 9192 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9193 * 9194 * This function is called in the SLI4 code path to read the port's 9195 * sli4 capabilities. 9196 * 9197 * This function may be be called from any context that can block-wait 9198 * for the completion. The expectation is that this routine is called 9199 * typically from probe_one or from the online routine. 9200 **/ 9201 int 9202 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9203 { 9204 int rc; 9205 struct lpfc_mqe *mqe = &mboxq->u.mqe; 9206 struct lpfc_pc_sli4_params *sli4_params; 9207 uint32_t mbox_tmo; 9208 int length; 9209 struct lpfc_sli4_parameters *mbx_sli4_parameters; 9210 9211 /* 9212 * By default, the driver assumes the SLI4 port requires RPI 9213 * header postings. The SLI4_PARAM response will correct this 9214 * assumption. 9215 */ 9216 phba->sli4_hba.rpi_hdrs_in_use = 1; 9217 9218 /* Read the port's SLI4 Config Parameters */ 9219 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 9220 sizeof(struct lpfc_sli4_cfg_mhdr)); 9221 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9222 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 9223 length, LPFC_SLI4_MBX_EMBED); 9224 if (!phba->sli4_hba.intr_enable) 9225 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9226 else { 9227 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9228 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9229 } 9230 if (unlikely(rc)) 9231 return rc; 9232 sli4_params = &phba->sli4_hba.pc_sli4_params; 9233 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 9234 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 9235 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 9236 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 9237 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 9238 mbx_sli4_parameters); 9239 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 9240 mbx_sli4_parameters); 9241 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 9242 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 9243 else 9244 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 9245 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 9246 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 9247 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 9248 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 9249 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 9250 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 9251 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 9252 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 9253 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 9254 mbx_sli4_parameters); 9255 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 9256 mbx_sli4_parameters); 9257 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 9258 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 9259 9260 /* Make sure that sge_supp_len can be handled by the driver */ 9261 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9262 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9263 9264 return 0; 9265 } 9266 9267 /** 9268 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 9269 * @pdev: pointer to PCI device 9270 * @pid: pointer to PCI device identifier 9271 * 9272 * This routine is to be called to attach a device with SLI-3 interface spec 9273 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9274 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 9275 * information of the device and driver to see if the driver state that it can 9276 * support this kind of device. If the match is successful, the driver core 9277 * invokes this routine. If this routine determines it can claim the HBA, it 9278 * does all the initialization that it needs to do to handle the HBA properly. 9279 * 9280 * Return code 9281 * 0 - driver can claim the device 9282 * negative value - driver can not claim the device 9283 **/ 9284 static int 9285 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 9286 { 9287 struct lpfc_hba *phba; 9288 struct lpfc_vport *vport = NULL; 9289 struct Scsi_Host *shost = NULL; 9290 int error; 9291 uint32_t cfg_mode, intr_mode; 9292 9293 /* Allocate memory for HBA structure */ 9294 phba = lpfc_hba_alloc(pdev); 9295 if (!phba) 9296 return -ENOMEM; 9297 9298 /* Perform generic PCI device enabling operation */ 9299 error = lpfc_enable_pci_dev(phba); 9300 if (error) 9301 goto out_free_phba; 9302 9303 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 9304 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 9305 if (error) 9306 goto out_disable_pci_dev; 9307 9308 /* Set up SLI-3 specific device PCI memory space */ 9309 error = lpfc_sli_pci_mem_setup(phba); 9310 if (error) { 9311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9312 "1402 Failed to set up pci memory space.\n"); 9313 goto out_disable_pci_dev; 9314 } 9315 9316 /* Set up phase-1 common device driver resources */ 9317 error = lpfc_setup_driver_resource_phase1(phba); 9318 if (error) { 9319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9320 "1403 Failed to set up driver resource.\n"); 9321 goto out_unset_pci_mem_s3; 9322 } 9323 9324 /* Set up SLI-3 specific device driver resources */ 9325 error = lpfc_sli_driver_resource_setup(phba); 9326 if (error) { 9327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9328 "1404 Failed to set up driver resource.\n"); 9329 goto out_unset_pci_mem_s3; 9330 } 9331 9332 /* Initialize and populate the iocb list per host */ 9333 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 9334 if (error) { 9335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9336 "1405 Failed to initialize iocb list.\n"); 9337 goto out_unset_driver_resource_s3; 9338 } 9339 9340 /* Set up common device driver resources */ 9341 error = lpfc_setup_driver_resource_phase2(phba); 9342 if (error) { 9343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9344 "1406 Failed to set up driver resource.\n"); 9345 goto out_free_iocb_list; 9346 } 9347 9348 /* Get the default values for Model Name and Description */ 9349 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9350 9351 /* Create SCSI host to the physical port */ 9352 error = lpfc_create_shost(phba); 9353 if (error) { 9354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9355 "1407 Failed to create scsi host.\n"); 9356 goto out_unset_driver_resource; 9357 } 9358 9359 /* Configure sysfs attributes */ 9360 vport = phba->pport; 9361 error = lpfc_alloc_sysfs_attr(vport); 9362 if (error) { 9363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9364 "1476 Failed to allocate sysfs attr\n"); 9365 goto out_destroy_shost; 9366 } 9367 9368 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9369 /* Now, trying to enable interrupt and bring up the device */ 9370 cfg_mode = phba->cfg_use_msi; 9371 while (true) { 9372 /* Put device to a known state before enabling interrupt */ 9373 lpfc_stop_port(phba); 9374 /* Configure and enable interrupt */ 9375 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 9376 if (intr_mode == LPFC_INTR_ERROR) { 9377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9378 "0431 Failed to enable interrupt.\n"); 9379 error = -ENODEV; 9380 goto out_free_sysfs_attr; 9381 } 9382 /* SLI-3 HBA setup */ 9383 if (lpfc_sli_hba_setup(phba)) { 9384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9385 "1477 Failed to set up hba\n"); 9386 error = -ENODEV; 9387 goto out_remove_device; 9388 } 9389 9390 /* Wait 50ms for the interrupts of previous mailbox commands */ 9391 msleep(50); 9392 /* Check active interrupts on message signaled interrupts */ 9393 if (intr_mode == 0 || 9394 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 9395 /* Log the current active interrupt mode */ 9396 phba->intr_mode = intr_mode; 9397 lpfc_log_intr_mode(phba, intr_mode); 9398 break; 9399 } else { 9400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9401 "0447 Configure interrupt mode (%d) " 9402 "failed active interrupt test.\n", 9403 intr_mode); 9404 /* Disable the current interrupt mode */ 9405 lpfc_sli_disable_intr(phba); 9406 /* Try next level of interrupt mode */ 9407 cfg_mode = --intr_mode; 9408 } 9409 } 9410 9411 /* Perform post initialization setup */ 9412 lpfc_post_init_setup(phba); 9413 9414 /* Check if there are static vports to be created. */ 9415 lpfc_create_static_vport(phba); 9416 9417 return 0; 9418 9419 out_remove_device: 9420 lpfc_unset_hba(phba); 9421 out_free_sysfs_attr: 9422 lpfc_free_sysfs_attr(vport); 9423 out_destroy_shost: 9424 lpfc_destroy_shost(phba); 9425 out_unset_driver_resource: 9426 lpfc_unset_driver_resource_phase2(phba); 9427 out_free_iocb_list: 9428 lpfc_free_iocb_list(phba); 9429 out_unset_driver_resource_s3: 9430 lpfc_sli_driver_resource_unset(phba); 9431 out_unset_pci_mem_s3: 9432 lpfc_sli_pci_mem_unset(phba); 9433 out_disable_pci_dev: 9434 lpfc_disable_pci_dev(phba); 9435 if (shost) 9436 scsi_host_put(shost); 9437 out_free_phba: 9438 lpfc_hba_free(phba); 9439 return error; 9440 } 9441 9442 /** 9443 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 9444 * @pdev: pointer to PCI device 9445 * 9446 * This routine is to be called to disattach a device with SLI-3 interface 9447 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9448 * removed from PCI bus, it performs all the necessary cleanup for the HBA 9449 * device to be removed from the PCI subsystem properly. 9450 **/ 9451 static void 9452 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 9453 { 9454 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9455 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 9456 struct lpfc_vport **vports; 9457 struct lpfc_hba *phba = vport->phba; 9458 int i; 9459 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 9460 9461 spin_lock_irq(&phba->hbalock); 9462 vport->load_flag |= FC_UNLOADING; 9463 spin_unlock_irq(&phba->hbalock); 9464 9465 lpfc_free_sysfs_attr(vport); 9466 9467 /* Release all the vports against this physical port */ 9468 vports = lpfc_create_vport_work_array(phba); 9469 if (vports != NULL) 9470 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9471 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 9472 continue; 9473 fc_vport_terminate(vports[i]->fc_vport); 9474 } 9475 lpfc_destroy_vport_work_array(phba, vports); 9476 9477 /* Remove FC host and then SCSI host with the physical port */ 9478 fc_remove_host(shost); 9479 scsi_remove_host(shost); 9480 lpfc_cleanup(vport); 9481 9482 /* 9483 * Bring down the SLI Layer. This step disable all interrupts, 9484 * clears the rings, discards all mailbox commands, and resets 9485 * the HBA. 9486 */ 9487 9488 /* HBA interrupt will be disabled after this call */ 9489 lpfc_sli_hba_down(phba); 9490 /* Stop kthread signal shall trigger work_done one more time */ 9491 kthread_stop(phba->worker_thread); 9492 /* Final cleanup of txcmplq and reset the HBA */ 9493 lpfc_sli_brdrestart(phba); 9494 9495 kfree(phba->vpi_bmask); 9496 kfree(phba->vpi_ids); 9497 9498 lpfc_stop_hba_timers(phba); 9499 spin_lock_irq(&phba->hbalock); 9500 list_del_init(&vport->listentry); 9501 spin_unlock_irq(&phba->hbalock); 9502 9503 lpfc_debugfs_terminate(vport); 9504 9505 /* Disable SR-IOV if enabled */ 9506 if (phba->cfg_sriov_nr_virtfn) 9507 pci_disable_sriov(pdev); 9508 9509 /* Disable interrupt */ 9510 lpfc_sli_disable_intr(phba); 9511 9512 scsi_host_put(shost); 9513 9514 /* 9515 * Call scsi_free before mem_free since scsi bufs are released to their 9516 * corresponding pools here. 9517 */ 9518 lpfc_scsi_free(phba); 9519 lpfc_mem_free_all(phba); 9520 9521 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9522 phba->hbqslimp.virt, phba->hbqslimp.phys); 9523 9524 /* Free resources associated with SLI2 interface */ 9525 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9526 phba->slim2p.virt, phba->slim2p.phys); 9527 9528 /* unmap adapter SLIM and Control Registers */ 9529 iounmap(phba->ctrl_regs_memmap_p); 9530 iounmap(phba->slim_memmap_p); 9531 9532 lpfc_hba_free(phba); 9533 9534 pci_release_selected_regions(pdev, bars); 9535 pci_disable_device(pdev); 9536 } 9537 9538 /** 9539 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 9540 * @pdev: pointer to PCI device 9541 * @msg: power management message 9542 * 9543 * This routine is to be called from the kernel's PCI subsystem to support 9544 * system Power Management (PM) to device with SLI-3 interface spec. When 9545 * PM invokes this method, it quiesces the device by stopping the driver's 9546 * worker thread for the device, turning off device's interrupt and DMA, 9547 * and bring the device offline. Note that as the driver implements the 9548 * minimum PM requirements to a power-aware driver's PM support for the 9549 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 9550 * to the suspend() method call will be treated as SUSPEND and the driver will 9551 * fully reinitialize its device during resume() method call, the driver will 9552 * set device to PCI_D3hot state in PCI config space instead of setting it 9553 * according to the @msg provided by the PM. 9554 * 9555 * Return code 9556 * 0 - driver suspended the device 9557 * Error otherwise 9558 **/ 9559 static int 9560 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 9561 { 9562 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9563 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9564 9565 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9566 "0473 PCI device Power Management suspend.\n"); 9567 9568 /* Bring down the device */ 9569 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 9570 lpfc_offline(phba); 9571 kthread_stop(phba->worker_thread); 9572 9573 /* Disable interrupt from device */ 9574 lpfc_sli_disable_intr(phba); 9575 9576 /* Save device state to PCI config space */ 9577 pci_save_state(pdev); 9578 pci_set_power_state(pdev, PCI_D3hot); 9579 9580 return 0; 9581 } 9582 9583 /** 9584 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 9585 * @pdev: pointer to PCI device 9586 * 9587 * This routine is to be called from the kernel's PCI subsystem to support 9588 * system Power Management (PM) to device with SLI-3 interface spec. When PM 9589 * invokes this method, it restores the device's PCI config space state and 9590 * fully reinitializes the device and brings it online. Note that as the 9591 * driver implements the minimum PM requirements to a power-aware driver's 9592 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 9593 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 9594 * driver will fully reinitialize its device during resume() method call, 9595 * the device will be set to PCI_D0 directly in PCI config space before 9596 * restoring the state. 9597 * 9598 * Return code 9599 * 0 - driver suspended the device 9600 * Error otherwise 9601 **/ 9602 static int 9603 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 9604 { 9605 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9606 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9607 uint32_t intr_mode; 9608 int error; 9609 9610 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9611 "0452 PCI device Power Management resume.\n"); 9612 9613 /* Restore device state from PCI config space */ 9614 pci_set_power_state(pdev, PCI_D0); 9615 pci_restore_state(pdev); 9616 9617 /* 9618 * As the new kernel behavior of pci_restore_state() API call clears 9619 * device saved_state flag, need to save the restored state again. 9620 */ 9621 pci_save_state(pdev); 9622 9623 if (pdev->is_busmaster) 9624 pci_set_master(pdev); 9625 9626 /* Startup the kernel thread for this host adapter. */ 9627 phba->worker_thread = kthread_run(lpfc_do_work, phba, 9628 "lpfc_worker_%d", phba->brd_no); 9629 if (IS_ERR(phba->worker_thread)) { 9630 error = PTR_ERR(phba->worker_thread); 9631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9632 "0434 PM resume failed to start worker " 9633 "thread: error=x%x.\n", error); 9634 return error; 9635 } 9636 9637 /* Configure and enable interrupt */ 9638 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 9639 if (intr_mode == LPFC_INTR_ERROR) { 9640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9641 "0430 PM resume Failed to enable interrupt\n"); 9642 return -EIO; 9643 } else 9644 phba->intr_mode = intr_mode; 9645 9646 /* Restart HBA and bring it online */ 9647 lpfc_sli_brdrestart(phba); 9648 lpfc_online(phba); 9649 9650 /* Log the current active interrupt mode */ 9651 lpfc_log_intr_mode(phba, phba->intr_mode); 9652 9653 return 0; 9654 } 9655 9656 /** 9657 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 9658 * @phba: pointer to lpfc hba data structure. 9659 * 9660 * This routine is called to prepare the SLI3 device for PCI slot recover. It 9661 * aborts all the outstanding SCSI I/Os to the pci device. 9662 **/ 9663 static void 9664 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 9665 { 9666 struct lpfc_sli *psli = &phba->sli; 9667 struct lpfc_sli_ring *pring; 9668 9669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9670 "2723 PCI channel I/O abort preparing for recovery\n"); 9671 9672 /* 9673 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9674 * and let the SCSI mid-layer to retry them to recover. 9675 */ 9676 pring = &psli->ring[psli->fcp_ring]; 9677 lpfc_sli_abort_iocb_ring(phba, pring); 9678 } 9679 9680 /** 9681 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 9682 * @phba: pointer to lpfc hba data structure. 9683 * 9684 * This routine is called to prepare the SLI3 device for PCI slot reset. It 9685 * disables the device interrupt and pci device, and aborts the internal FCP 9686 * pending I/Os. 9687 **/ 9688 static void 9689 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 9690 { 9691 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9692 "2710 PCI channel disable preparing for reset\n"); 9693 9694 /* Block any management I/Os to the device */ 9695 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 9696 9697 /* Block all SCSI devices' I/Os on the host */ 9698 lpfc_scsi_dev_block(phba); 9699 9700 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 9701 lpfc_sli_flush_fcp_rings(phba); 9702 9703 /* stop all timers */ 9704 lpfc_stop_hba_timers(phba); 9705 9706 /* Disable interrupt and pci device */ 9707 lpfc_sli_disable_intr(phba); 9708 pci_disable_device(phba->pcidev); 9709 } 9710 9711 /** 9712 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 9713 * @phba: pointer to lpfc hba data structure. 9714 * 9715 * This routine is called to prepare the SLI3 device for PCI slot permanently 9716 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 9717 * pending I/Os. 9718 **/ 9719 static void 9720 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 9721 { 9722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9723 "2711 PCI channel permanent disable for failure\n"); 9724 /* Block all SCSI devices' I/Os on the host */ 9725 lpfc_scsi_dev_block(phba); 9726 9727 /* stop all timers */ 9728 lpfc_stop_hba_timers(phba); 9729 9730 /* Clean up all driver's outstanding SCSI I/Os */ 9731 lpfc_sli_flush_fcp_rings(phba); 9732 } 9733 9734 /** 9735 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 9736 * @pdev: pointer to PCI device. 9737 * @state: the current PCI connection state. 9738 * 9739 * This routine is called from the PCI subsystem for I/O error handling to 9740 * device with SLI-3 interface spec. This function is called by the PCI 9741 * subsystem after a PCI bus error affecting this device has been detected. 9742 * When this function is invoked, it will need to stop all the I/Os and 9743 * interrupt(s) to the device. Once that is done, it will return 9744 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 9745 * as desired. 9746 * 9747 * Return codes 9748 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 9749 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9750 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9751 **/ 9752 static pci_ers_result_t 9753 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 9754 { 9755 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9756 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9757 9758 switch (state) { 9759 case pci_channel_io_normal: 9760 /* Non-fatal error, prepare for recovery */ 9761 lpfc_sli_prep_dev_for_recover(phba); 9762 return PCI_ERS_RESULT_CAN_RECOVER; 9763 case pci_channel_io_frozen: 9764 /* Fatal error, prepare for slot reset */ 9765 lpfc_sli_prep_dev_for_reset(phba); 9766 return PCI_ERS_RESULT_NEED_RESET; 9767 case pci_channel_io_perm_failure: 9768 /* Permanent failure, prepare for device down */ 9769 lpfc_sli_prep_dev_for_perm_failure(phba); 9770 return PCI_ERS_RESULT_DISCONNECT; 9771 default: 9772 /* Unknown state, prepare and request slot reset */ 9773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9774 "0472 Unknown PCI error state: x%x\n", state); 9775 lpfc_sli_prep_dev_for_reset(phba); 9776 return PCI_ERS_RESULT_NEED_RESET; 9777 } 9778 } 9779 9780 /** 9781 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 9782 * @pdev: pointer to PCI device. 9783 * 9784 * This routine is called from the PCI subsystem for error handling to 9785 * device with SLI-3 interface spec. This is called after PCI bus has been 9786 * reset to restart the PCI card from scratch, as if from a cold-boot. 9787 * During the PCI subsystem error recovery, after driver returns 9788 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 9789 * recovery and then call this routine before calling the .resume method 9790 * to recover the device. This function will initialize the HBA device, 9791 * enable the interrupt, but it will just put the HBA to offline state 9792 * without passing any I/O traffic. 9793 * 9794 * Return codes 9795 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9796 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9797 */ 9798 static pci_ers_result_t 9799 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 9800 { 9801 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9802 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9803 struct lpfc_sli *psli = &phba->sli; 9804 uint32_t intr_mode; 9805 9806 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9807 if (pci_enable_device_mem(pdev)) { 9808 printk(KERN_ERR "lpfc: Cannot re-enable " 9809 "PCI device after reset.\n"); 9810 return PCI_ERS_RESULT_DISCONNECT; 9811 } 9812 9813 pci_restore_state(pdev); 9814 9815 /* 9816 * As the new kernel behavior of pci_restore_state() API call clears 9817 * device saved_state flag, need to save the restored state again. 9818 */ 9819 pci_save_state(pdev); 9820 9821 if (pdev->is_busmaster) 9822 pci_set_master(pdev); 9823 9824 spin_lock_irq(&phba->hbalock); 9825 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9826 spin_unlock_irq(&phba->hbalock); 9827 9828 /* Configure and enable interrupt */ 9829 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 9830 if (intr_mode == LPFC_INTR_ERROR) { 9831 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9832 "0427 Cannot re-enable interrupt after " 9833 "slot reset.\n"); 9834 return PCI_ERS_RESULT_DISCONNECT; 9835 } else 9836 phba->intr_mode = intr_mode; 9837 9838 /* Take device offline, it will perform cleanup */ 9839 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 9840 lpfc_offline(phba); 9841 lpfc_sli_brdrestart(phba); 9842 9843 /* Log the current active interrupt mode */ 9844 lpfc_log_intr_mode(phba, phba->intr_mode); 9845 9846 return PCI_ERS_RESULT_RECOVERED; 9847 } 9848 9849 /** 9850 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 9851 * @pdev: pointer to PCI device 9852 * 9853 * This routine is called from the PCI subsystem for error handling to device 9854 * with SLI-3 interface spec. It is called when kernel error recovery tells 9855 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9856 * error recovery. After this call, traffic can start to flow from this device 9857 * again. 9858 */ 9859 static void 9860 lpfc_io_resume_s3(struct pci_dev *pdev) 9861 { 9862 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9863 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9864 9865 /* Bring device online, it will be no-op for non-fatal error resume */ 9866 lpfc_online(phba); 9867 9868 /* Clean up Advanced Error Reporting (AER) if needed */ 9869 if (phba->hba_flag & HBA_AER_ENABLED) 9870 pci_cleanup_aer_uncorrect_error_status(pdev); 9871 } 9872 9873 /** 9874 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 9875 * @phba: pointer to lpfc hba data structure. 9876 * 9877 * returns the number of ELS/CT IOCBs to reserve 9878 **/ 9879 int 9880 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 9881 { 9882 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 9883 9884 if (phba->sli_rev == LPFC_SLI_REV4) { 9885 if (max_xri <= 100) 9886 return 10; 9887 else if (max_xri <= 256) 9888 return 25; 9889 else if (max_xri <= 512) 9890 return 50; 9891 else if (max_xri <= 1024) 9892 return 100; 9893 else if (max_xri <= 1536) 9894 return 150; 9895 else if (max_xri <= 2048) 9896 return 200; 9897 else 9898 return 250; 9899 } else 9900 return 0; 9901 } 9902 9903 /** 9904 * lpfc_write_firmware - attempt to write a firmware image to the port 9905 * @fw: pointer to firmware image returned from request_firmware. 9906 * @phba: pointer to lpfc hba data structure. 9907 * 9908 **/ 9909 static void 9910 lpfc_write_firmware(const struct firmware *fw, void *context) 9911 { 9912 struct lpfc_hba *phba = (struct lpfc_hba *)context; 9913 char fwrev[FW_REV_STR_SIZE]; 9914 struct lpfc_grp_hdr *image; 9915 struct list_head dma_buffer_list; 9916 int i, rc = 0; 9917 struct lpfc_dmabuf *dmabuf, *next; 9918 uint32_t offset = 0, temp_offset = 0; 9919 9920 /* It can be null in no-wait mode, sanity check */ 9921 if (!fw) { 9922 rc = -ENXIO; 9923 goto out; 9924 } 9925 image = (struct lpfc_grp_hdr *)fw->data; 9926 9927 INIT_LIST_HEAD(&dma_buffer_list); 9928 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || 9929 (bf_get_be32(lpfc_grp_hdr_file_type, image) != 9930 LPFC_FILE_TYPE_GROUP) || 9931 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || 9932 (be32_to_cpu(image->size) != fw->size)) { 9933 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9934 "3022 Invalid FW image found. " 9935 "Magic:%x Type:%x ID:%x\n", 9936 be32_to_cpu(image->magic_number), 9937 bf_get_be32(lpfc_grp_hdr_file_type, image), 9938 bf_get_be32(lpfc_grp_hdr_id, image)); 9939 rc = -EINVAL; 9940 goto release_out; 9941 } 9942 lpfc_decode_firmware_rev(phba, fwrev, 1); 9943 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 9944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9945 "3023 Updating Firmware, Current Version:%s " 9946 "New Version:%s\n", 9947 fwrev, image->revision); 9948 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 9949 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 9950 GFP_KERNEL); 9951 if (!dmabuf) { 9952 rc = -ENOMEM; 9953 goto release_out; 9954 } 9955 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 9956 SLI4_PAGE_SIZE, 9957 &dmabuf->phys, 9958 GFP_KERNEL); 9959 if (!dmabuf->virt) { 9960 kfree(dmabuf); 9961 rc = -ENOMEM; 9962 goto release_out; 9963 } 9964 list_add_tail(&dmabuf->list, &dma_buffer_list); 9965 } 9966 while (offset < fw->size) { 9967 temp_offset = offset; 9968 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 9969 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 9970 memcpy(dmabuf->virt, 9971 fw->data + temp_offset, 9972 fw->size - temp_offset); 9973 temp_offset = fw->size; 9974 break; 9975 } 9976 memcpy(dmabuf->virt, fw->data + temp_offset, 9977 SLI4_PAGE_SIZE); 9978 temp_offset += SLI4_PAGE_SIZE; 9979 } 9980 rc = lpfc_wr_object(phba, &dma_buffer_list, 9981 (fw->size - offset), &offset); 9982 if (rc) 9983 goto release_out; 9984 } 9985 rc = offset; 9986 } 9987 9988 release_out: 9989 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 9990 list_del(&dmabuf->list); 9991 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 9992 dmabuf->virt, dmabuf->phys); 9993 kfree(dmabuf); 9994 } 9995 release_firmware(fw); 9996 out: 9997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9998 "3024 Firmware update done: %d.\n", rc); 9999 return; 10000 } 10001 10002 /** 10003 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 10004 * @phba: pointer to lpfc hba data structure. 10005 * 10006 * This routine is called to perform Linux generic firmware upgrade on device 10007 * that supports such feature. 10008 **/ 10009 int 10010 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 10011 { 10012 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 10013 int ret; 10014 const struct firmware *fw; 10015 10016 /* Only supported on SLI4 interface type 2 for now */ 10017 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 10018 LPFC_SLI_INTF_IF_TYPE_2) 10019 return -EPERM; 10020 10021 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 10022 10023 if (fw_upgrade == INT_FW_UPGRADE) { 10024 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 10025 file_name, &phba->pcidev->dev, 10026 GFP_KERNEL, (void *)phba, 10027 lpfc_write_firmware); 10028 } else if (fw_upgrade == RUN_FW_UPGRADE) { 10029 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 10030 if (!ret) 10031 lpfc_write_firmware(fw, (void *)phba); 10032 } else { 10033 ret = -EINVAL; 10034 } 10035 10036 return ret; 10037 } 10038 10039 /** 10040 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 10041 * @pdev: pointer to PCI device 10042 * @pid: pointer to PCI device identifier 10043 * 10044 * This routine is called from the kernel's PCI subsystem to device with 10045 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10046 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 10047 * information of the device and driver to see if the driver state that it 10048 * can support this kind of device. If the match is successful, the driver 10049 * core invokes this routine. If this routine determines it can claim the HBA, 10050 * it does all the initialization that it needs to do to handle the HBA 10051 * properly. 10052 * 10053 * Return code 10054 * 0 - driver can claim the device 10055 * negative value - driver can not claim the device 10056 **/ 10057 static int 10058 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 10059 { 10060 struct lpfc_hba *phba; 10061 struct lpfc_vport *vport = NULL; 10062 struct Scsi_Host *shost = NULL; 10063 int error, ret; 10064 uint32_t cfg_mode, intr_mode; 10065 int adjusted_fcp_io_channel; 10066 10067 /* Allocate memory for HBA structure */ 10068 phba = lpfc_hba_alloc(pdev); 10069 if (!phba) 10070 return -ENOMEM; 10071 10072 /* Perform generic PCI device enabling operation */ 10073 error = lpfc_enable_pci_dev(phba); 10074 if (error) 10075 goto out_free_phba; 10076 10077 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 10078 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 10079 if (error) 10080 goto out_disable_pci_dev; 10081 10082 /* Set up SLI-4 specific device PCI memory space */ 10083 error = lpfc_sli4_pci_mem_setup(phba); 10084 if (error) { 10085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10086 "1410 Failed to set up pci memory space.\n"); 10087 goto out_disable_pci_dev; 10088 } 10089 10090 /* Set up phase-1 common device driver resources */ 10091 error = lpfc_setup_driver_resource_phase1(phba); 10092 if (error) { 10093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10094 "1411 Failed to set up driver resource.\n"); 10095 goto out_unset_pci_mem_s4; 10096 } 10097 10098 /* Set up SLI-4 Specific device driver resources */ 10099 error = lpfc_sli4_driver_resource_setup(phba); 10100 if (error) { 10101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10102 "1412 Failed to set up driver resource.\n"); 10103 goto out_unset_pci_mem_s4; 10104 } 10105 10106 /* Initialize and populate the iocb list per host */ 10107 10108 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10109 "2821 initialize iocb list %d.\n", 10110 phba->cfg_iocb_cnt*1024); 10111 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 10112 10113 if (error) { 10114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10115 "1413 Failed to initialize iocb list.\n"); 10116 goto out_unset_driver_resource_s4; 10117 } 10118 10119 INIT_LIST_HEAD(&phba->active_rrq_list); 10120 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 10121 10122 /* Set up common device driver resources */ 10123 error = lpfc_setup_driver_resource_phase2(phba); 10124 if (error) { 10125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10126 "1414 Failed to set up driver resource.\n"); 10127 goto out_free_iocb_list; 10128 } 10129 10130 /* Get the default values for Model Name and Description */ 10131 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 10132 10133 /* Create SCSI host to the physical port */ 10134 error = lpfc_create_shost(phba); 10135 if (error) { 10136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10137 "1415 Failed to create scsi host.\n"); 10138 goto out_unset_driver_resource; 10139 } 10140 10141 /* Configure sysfs attributes */ 10142 vport = phba->pport; 10143 error = lpfc_alloc_sysfs_attr(vport); 10144 if (error) { 10145 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10146 "1416 Failed to allocate sysfs attr\n"); 10147 goto out_destroy_shost; 10148 } 10149 10150 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 10151 /* Now, trying to enable interrupt and bring up the device */ 10152 cfg_mode = phba->cfg_use_msi; 10153 10154 /* Put device to a known state before enabling interrupt */ 10155 lpfc_stop_port(phba); 10156 /* Configure and enable interrupt */ 10157 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 10158 if (intr_mode == LPFC_INTR_ERROR) { 10159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10160 "0426 Failed to enable interrupt.\n"); 10161 error = -ENODEV; 10162 goto out_free_sysfs_attr; 10163 } 10164 /* Default to single EQ for non-MSI-X */ 10165 if (phba->intr_type != MSIX) 10166 adjusted_fcp_io_channel = 1; 10167 else 10168 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; 10169 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; 10170 /* Set up SLI-4 HBA */ 10171 if (lpfc_sli4_hba_setup(phba)) { 10172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10173 "1421 Failed to set up hba\n"); 10174 error = -ENODEV; 10175 goto out_disable_intr; 10176 } 10177 10178 /* Log the current active interrupt mode */ 10179 phba->intr_mode = intr_mode; 10180 lpfc_log_intr_mode(phba, intr_mode); 10181 10182 /* Perform post initialization setup */ 10183 lpfc_post_init_setup(phba); 10184 10185 /* check for firmware upgrade or downgrade */ 10186 if (phba->cfg_request_firmware_upgrade) 10187 ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 10188 10189 /* Check if there are static vports to be created. */ 10190 lpfc_create_static_vport(phba); 10191 return 0; 10192 10193 out_disable_intr: 10194 lpfc_sli4_disable_intr(phba); 10195 out_free_sysfs_attr: 10196 lpfc_free_sysfs_attr(vport); 10197 out_destroy_shost: 10198 lpfc_destroy_shost(phba); 10199 out_unset_driver_resource: 10200 lpfc_unset_driver_resource_phase2(phba); 10201 out_free_iocb_list: 10202 lpfc_free_iocb_list(phba); 10203 out_unset_driver_resource_s4: 10204 lpfc_sli4_driver_resource_unset(phba); 10205 out_unset_pci_mem_s4: 10206 lpfc_sli4_pci_mem_unset(phba); 10207 out_disable_pci_dev: 10208 lpfc_disable_pci_dev(phba); 10209 if (shost) 10210 scsi_host_put(shost); 10211 out_free_phba: 10212 lpfc_hba_free(phba); 10213 return error; 10214 } 10215 10216 /** 10217 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 10218 * @pdev: pointer to PCI device 10219 * 10220 * This routine is called from the kernel's PCI subsystem to device with 10221 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10222 * removed from PCI bus, it performs all the necessary cleanup for the HBA 10223 * device to be removed from the PCI subsystem properly. 10224 **/ 10225 static void 10226 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 10227 { 10228 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10229 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 10230 struct lpfc_vport **vports; 10231 struct lpfc_hba *phba = vport->phba; 10232 int i; 10233 10234 /* Mark the device unloading flag */ 10235 spin_lock_irq(&phba->hbalock); 10236 vport->load_flag |= FC_UNLOADING; 10237 spin_unlock_irq(&phba->hbalock); 10238 10239 /* Free the HBA sysfs attributes */ 10240 lpfc_free_sysfs_attr(vport); 10241 10242 /* Release all the vports against this physical port */ 10243 vports = lpfc_create_vport_work_array(phba); 10244 if (vports != NULL) 10245 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10246 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 10247 continue; 10248 fc_vport_terminate(vports[i]->fc_vport); 10249 } 10250 lpfc_destroy_vport_work_array(phba, vports); 10251 10252 /* Remove FC host and then SCSI host with the physical port */ 10253 fc_remove_host(shost); 10254 scsi_remove_host(shost); 10255 10256 /* Perform cleanup on the physical port */ 10257 lpfc_cleanup(vport); 10258 10259 /* 10260 * Bring down the SLI Layer. This step disables all interrupts, 10261 * clears the rings, discards all mailbox commands, and resets 10262 * the HBA FCoE function. 10263 */ 10264 lpfc_debugfs_terminate(vport); 10265 lpfc_sli4_hba_unset(phba); 10266 10267 spin_lock_irq(&phba->hbalock); 10268 list_del_init(&vport->listentry); 10269 spin_unlock_irq(&phba->hbalock); 10270 10271 /* Perform scsi free before driver resource_unset since scsi 10272 * buffers are released to their corresponding pools here. 10273 */ 10274 lpfc_scsi_free(phba); 10275 10276 lpfc_sli4_driver_resource_unset(phba); 10277 10278 /* Unmap adapter Control and Doorbell registers */ 10279 lpfc_sli4_pci_mem_unset(phba); 10280 10281 /* Release PCI resources and disable device's PCI function */ 10282 scsi_host_put(shost); 10283 lpfc_disable_pci_dev(phba); 10284 10285 /* Finally, free the driver's device data structure */ 10286 lpfc_hba_free(phba); 10287 10288 return; 10289 } 10290 10291 /** 10292 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 10293 * @pdev: pointer to PCI device 10294 * @msg: power management message 10295 * 10296 * This routine is called from the kernel's PCI subsystem to support system 10297 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 10298 * this method, it quiesces the device by stopping the driver's worker 10299 * thread for the device, turning off device's interrupt and DMA, and bring 10300 * the device offline. Note that as the driver implements the minimum PM 10301 * requirements to a power-aware driver's PM support for suspend/resume -- all 10302 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 10303 * method call will be treated as SUSPEND and the driver will fully 10304 * reinitialize its device during resume() method call, the driver will set 10305 * device to PCI_D3hot state in PCI config space instead of setting it 10306 * according to the @msg provided by the PM. 10307 * 10308 * Return code 10309 * 0 - driver suspended the device 10310 * Error otherwise 10311 **/ 10312 static int 10313 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 10314 { 10315 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10316 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10317 10318 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10319 "2843 PCI device Power Management suspend.\n"); 10320 10321 /* Bring down the device */ 10322 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10323 lpfc_offline(phba); 10324 kthread_stop(phba->worker_thread); 10325 10326 /* Disable interrupt from device */ 10327 lpfc_sli4_disable_intr(phba); 10328 lpfc_sli4_queue_destroy(phba); 10329 10330 /* Save device state to PCI config space */ 10331 pci_save_state(pdev); 10332 pci_set_power_state(pdev, PCI_D3hot); 10333 10334 return 0; 10335 } 10336 10337 /** 10338 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 10339 * @pdev: pointer to PCI device 10340 * 10341 * This routine is called from the kernel's PCI subsystem to support system 10342 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 10343 * this method, it restores the device's PCI config space state and fully 10344 * reinitializes the device and brings it online. Note that as the driver 10345 * implements the minimum PM requirements to a power-aware driver's PM for 10346 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 10347 * to the suspend() method call will be treated as SUSPEND and the driver 10348 * will fully reinitialize its device during resume() method call, the device 10349 * will be set to PCI_D0 directly in PCI config space before restoring the 10350 * state. 10351 * 10352 * Return code 10353 * 0 - driver suspended the device 10354 * Error otherwise 10355 **/ 10356 static int 10357 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 10358 { 10359 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10360 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10361 uint32_t intr_mode; 10362 int error; 10363 10364 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10365 "0292 PCI device Power Management resume.\n"); 10366 10367 /* Restore device state from PCI config space */ 10368 pci_set_power_state(pdev, PCI_D0); 10369 pci_restore_state(pdev); 10370 10371 /* 10372 * As the new kernel behavior of pci_restore_state() API call clears 10373 * device saved_state flag, need to save the restored state again. 10374 */ 10375 pci_save_state(pdev); 10376 10377 if (pdev->is_busmaster) 10378 pci_set_master(pdev); 10379 10380 /* Startup the kernel thread for this host adapter. */ 10381 phba->worker_thread = kthread_run(lpfc_do_work, phba, 10382 "lpfc_worker_%d", phba->brd_no); 10383 if (IS_ERR(phba->worker_thread)) { 10384 error = PTR_ERR(phba->worker_thread); 10385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10386 "0293 PM resume failed to start worker " 10387 "thread: error=x%x.\n", error); 10388 return error; 10389 } 10390 10391 /* Configure and enable interrupt */ 10392 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10393 if (intr_mode == LPFC_INTR_ERROR) { 10394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10395 "0294 PM resume Failed to enable interrupt\n"); 10396 return -EIO; 10397 } else 10398 phba->intr_mode = intr_mode; 10399 10400 /* Restart HBA and bring it online */ 10401 lpfc_sli_brdrestart(phba); 10402 lpfc_online(phba); 10403 10404 /* Log the current active interrupt mode */ 10405 lpfc_log_intr_mode(phba, phba->intr_mode); 10406 10407 return 0; 10408 } 10409 10410 /** 10411 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 10412 * @phba: pointer to lpfc hba data structure. 10413 * 10414 * This routine is called to prepare the SLI4 device for PCI slot recover. It 10415 * aborts all the outstanding SCSI I/Os to the pci device. 10416 **/ 10417 static void 10418 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 10419 { 10420 struct lpfc_sli *psli = &phba->sli; 10421 struct lpfc_sli_ring *pring; 10422 10423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10424 "2828 PCI channel I/O abort preparing for recovery\n"); 10425 /* 10426 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10427 * and let the SCSI mid-layer to retry them to recover. 10428 */ 10429 pring = &psli->ring[psli->fcp_ring]; 10430 lpfc_sli_abort_iocb_ring(phba, pring); 10431 } 10432 10433 /** 10434 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 10435 * @phba: pointer to lpfc hba data structure. 10436 * 10437 * This routine is called to prepare the SLI4 device for PCI slot reset. It 10438 * disables the device interrupt and pci device, and aborts the internal FCP 10439 * pending I/Os. 10440 **/ 10441 static void 10442 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 10443 { 10444 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10445 "2826 PCI channel disable preparing for reset\n"); 10446 10447 /* Block any management I/Os to the device */ 10448 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 10449 10450 /* Block all SCSI devices' I/Os on the host */ 10451 lpfc_scsi_dev_block(phba); 10452 10453 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10454 lpfc_sli_flush_fcp_rings(phba); 10455 10456 /* stop all timers */ 10457 lpfc_stop_hba_timers(phba); 10458 10459 /* Disable interrupt and pci device */ 10460 lpfc_sli4_disable_intr(phba); 10461 lpfc_sli4_queue_destroy(phba); 10462 pci_disable_device(phba->pcidev); 10463 } 10464 10465 /** 10466 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 10467 * @phba: pointer to lpfc hba data structure. 10468 * 10469 * This routine is called to prepare the SLI4 device for PCI slot permanently 10470 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10471 * pending I/Os. 10472 **/ 10473 static void 10474 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10475 { 10476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10477 "2827 PCI channel permanent disable for failure\n"); 10478 10479 /* Block all SCSI devices' I/Os on the host */ 10480 lpfc_scsi_dev_block(phba); 10481 10482 /* stop all timers */ 10483 lpfc_stop_hba_timers(phba); 10484 10485 /* Clean up all driver's outstanding SCSI I/Os */ 10486 lpfc_sli_flush_fcp_rings(phba); 10487 } 10488 10489 /** 10490 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 10491 * @pdev: pointer to PCI device. 10492 * @state: the current PCI connection state. 10493 * 10494 * This routine is called from the PCI subsystem for error handling to device 10495 * with SLI-4 interface spec. This function is called by the PCI subsystem 10496 * after a PCI bus error affecting this device has been detected. When this 10497 * function is invoked, it will need to stop all the I/Os and interrupt(s) 10498 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 10499 * for the PCI subsystem to perform proper recovery as desired. 10500 * 10501 * Return codes 10502 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10503 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10504 **/ 10505 static pci_ers_result_t 10506 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 10507 { 10508 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10509 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10510 10511 switch (state) { 10512 case pci_channel_io_normal: 10513 /* Non-fatal error, prepare for recovery */ 10514 lpfc_sli4_prep_dev_for_recover(phba); 10515 return PCI_ERS_RESULT_CAN_RECOVER; 10516 case pci_channel_io_frozen: 10517 /* Fatal error, prepare for slot reset */ 10518 lpfc_sli4_prep_dev_for_reset(phba); 10519 return PCI_ERS_RESULT_NEED_RESET; 10520 case pci_channel_io_perm_failure: 10521 /* Permanent failure, prepare for device down */ 10522 lpfc_sli4_prep_dev_for_perm_failure(phba); 10523 return PCI_ERS_RESULT_DISCONNECT; 10524 default: 10525 /* Unknown state, prepare and request slot reset */ 10526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10527 "2825 Unknown PCI error state: x%x\n", state); 10528 lpfc_sli4_prep_dev_for_reset(phba); 10529 return PCI_ERS_RESULT_NEED_RESET; 10530 } 10531 } 10532 10533 /** 10534 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 10535 * @pdev: pointer to PCI device. 10536 * 10537 * This routine is called from the PCI subsystem for error handling to device 10538 * with SLI-4 interface spec. It is called after PCI bus has been reset to 10539 * restart the PCI card from scratch, as if from a cold-boot. During the 10540 * PCI subsystem error recovery, after the driver returns 10541 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10542 * recovery and then call this routine before calling the .resume method to 10543 * recover the device. This function will initialize the HBA device, enable 10544 * the interrupt, but it will just put the HBA to offline state without 10545 * passing any I/O traffic. 10546 * 10547 * Return codes 10548 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10549 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10550 */ 10551 static pci_ers_result_t 10552 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 10553 { 10554 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10555 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10556 struct lpfc_sli *psli = &phba->sli; 10557 uint32_t intr_mode; 10558 10559 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10560 if (pci_enable_device_mem(pdev)) { 10561 printk(KERN_ERR "lpfc: Cannot re-enable " 10562 "PCI device after reset.\n"); 10563 return PCI_ERS_RESULT_DISCONNECT; 10564 } 10565 10566 pci_restore_state(pdev); 10567 10568 /* 10569 * As the new kernel behavior of pci_restore_state() API call clears 10570 * device saved_state flag, need to save the restored state again. 10571 */ 10572 pci_save_state(pdev); 10573 10574 if (pdev->is_busmaster) 10575 pci_set_master(pdev); 10576 10577 spin_lock_irq(&phba->hbalock); 10578 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10579 spin_unlock_irq(&phba->hbalock); 10580 10581 /* Configure and enable interrupt */ 10582 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10583 if (intr_mode == LPFC_INTR_ERROR) { 10584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10585 "2824 Cannot re-enable interrupt after " 10586 "slot reset.\n"); 10587 return PCI_ERS_RESULT_DISCONNECT; 10588 } else 10589 phba->intr_mode = intr_mode; 10590 10591 /* Log the current active interrupt mode */ 10592 lpfc_log_intr_mode(phba, phba->intr_mode); 10593 10594 return PCI_ERS_RESULT_RECOVERED; 10595 } 10596 10597 /** 10598 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 10599 * @pdev: pointer to PCI device 10600 * 10601 * This routine is called from the PCI subsystem for error handling to device 10602 * with SLI-4 interface spec. It is called when kernel error recovery tells 10603 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10604 * error recovery. After this call, traffic can start to flow from this device 10605 * again. 10606 **/ 10607 static void 10608 lpfc_io_resume_s4(struct pci_dev *pdev) 10609 { 10610 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10611 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10612 10613 /* 10614 * In case of slot reset, as function reset is performed through 10615 * mailbox command which needs DMA to be enabled, this operation 10616 * has to be moved to the io resume phase. Taking device offline 10617 * will perform the necessary cleanup. 10618 */ 10619 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 10620 /* Perform device reset */ 10621 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10622 lpfc_offline(phba); 10623 lpfc_sli_brdrestart(phba); 10624 /* Bring the device back online */ 10625 lpfc_online(phba); 10626 } 10627 10628 /* Clean up Advanced Error Reporting (AER) if needed */ 10629 if (phba->hba_flag & HBA_AER_ENABLED) 10630 pci_cleanup_aer_uncorrect_error_status(pdev); 10631 } 10632 10633 /** 10634 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 10635 * @pdev: pointer to PCI device 10636 * @pid: pointer to PCI device identifier 10637 * 10638 * This routine is to be registered to the kernel's PCI subsystem. When an 10639 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 10640 * at PCI device-specific information of the device and driver to see if the 10641 * driver state that it can support this kind of device. If the match is 10642 * successful, the driver core invokes this routine. This routine dispatches 10643 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 10644 * do all the initialization that it needs to do to handle the HBA device 10645 * properly. 10646 * 10647 * Return code 10648 * 0 - driver can claim the device 10649 * negative value - driver can not claim the device 10650 **/ 10651 static int 10652 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 10653 { 10654 int rc; 10655 struct lpfc_sli_intf intf; 10656 10657 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 10658 return -ENODEV; 10659 10660 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 10661 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 10662 rc = lpfc_pci_probe_one_s4(pdev, pid); 10663 else 10664 rc = lpfc_pci_probe_one_s3(pdev, pid); 10665 10666 return rc; 10667 } 10668 10669 /** 10670 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 10671 * @pdev: pointer to PCI device 10672 * 10673 * This routine is to be registered to the kernel's PCI subsystem. When an 10674 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 10675 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 10676 * remove routine, which will perform all the necessary cleanup for the 10677 * device to be removed from the PCI subsystem properly. 10678 **/ 10679 static void 10680 lpfc_pci_remove_one(struct pci_dev *pdev) 10681 { 10682 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10683 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10684 10685 switch (phba->pci_dev_grp) { 10686 case LPFC_PCI_DEV_LP: 10687 lpfc_pci_remove_one_s3(pdev); 10688 break; 10689 case LPFC_PCI_DEV_OC: 10690 lpfc_pci_remove_one_s4(pdev); 10691 break; 10692 default: 10693 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10694 "1424 Invalid PCI device group: 0x%x\n", 10695 phba->pci_dev_grp); 10696 break; 10697 } 10698 return; 10699 } 10700 10701 /** 10702 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 10703 * @pdev: pointer to PCI device 10704 * @msg: power management message 10705 * 10706 * This routine is to be registered to the kernel's PCI subsystem to support 10707 * system Power Management (PM). When PM invokes this method, it dispatches 10708 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 10709 * suspend the device. 10710 * 10711 * Return code 10712 * 0 - driver suspended the device 10713 * Error otherwise 10714 **/ 10715 static int 10716 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 10717 { 10718 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10719 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10720 int rc = -ENODEV; 10721 10722 switch (phba->pci_dev_grp) { 10723 case LPFC_PCI_DEV_LP: 10724 rc = lpfc_pci_suspend_one_s3(pdev, msg); 10725 break; 10726 case LPFC_PCI_DEV_OC: 10727 rc = lpfc_pci_suspend_one_s4(pdev, msg); 10728 break; 10729 default: 10730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10731 "1425 Invalid PCI device group: 0x%x\n", 10732 phba->pci_dev_grp); 10733 break; 10734 } 10735 return rc; 10736 } 10737 10738 /** 10739 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 10740 * @pdev: pointer to PCI device 10741 * 10742 * This routine is to be registered to the kernel's PCI subsystem to support 10743 * system Power Management (PM). When PM invokes this method, it dispatches 10744 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 10745 * resume the device. 10746 * 10747 * Return code 10748 * 0 - driver suspended the device 10749 * Error otherwise 10750 **/ 10751 static int 10752 lpfc_pci_resume_one(struct pci_dev *pdev) 10753 { 10754 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10755 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10756 int rc = -ENODEV; 10757 10758 switch (phba->pci_dev_grp) { 10759 case LPFC_PCI_DEV_LP: 10760 rc = lpfc_pci_resume_one_s3(pdev); 10761 break; 10762 case LPFC_PCI_DEV_OC: 10763 rc = lpfc_pci_resume_one_s4(pdev); 10764 break; 10765 default: 10766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10767 "1426 Invalid PCI device group: 0x%x\n", 10768 phba->pci_dev_grp); 10769 break; 10770 } 10771 return rc; 10772 } 10773 10774 /** 10775 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 10776 * @pdev: pointer to PCI device. 10777 * @state: the current PCI connection state. 10778 * 10779 * This routine is registered to the PCI subsystem for error handling. This 10780 * function is called by the PCI subsystem after a PCI bus error affecting 10781 * this device has been detected. When this routine is invoked, it dispatches 10782 * the action to the proper SLI-3 or SLI-4 device error detected handling 10783 * routine, which will perform the proper error detected operation. 10784 * 10785 * Return codes 10786 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10787 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10788 **/ 10789 static pci_ers_result_t 10790 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 10791 { 10792 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10793 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10794 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 10795 10796 switch (phba->pci_dev_grp) { 10797 case LPFC_PCI_DEV_LP: 10798 rc = lpfc_io_error_detected_s3(pdev, state); 10799 break; 10800 case LPFC_PCI_DEV_OC: 10801 rc = lpfc_io_error_detected_s4(pdev, state); 10802 break; 10803 default: 10804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10805 "1427 Invalid PCI device group: 0x%x\n", 10806 phba->pci_dev_grp); 10807 break; 10808 } 10809 return rc; 10810 } 10811 10812 /** 10813 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 10814 * @pdev: pointer to PCI device. 10815 * 10816 * This routine is registered to the PCI subsystem for error handling. This 10817 * function is called after PCI bus has been reset to restart the PCI card 10818 * from scratch, as if from a cold-boot. When this routine is invoked, it 10819 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 10820 * routine, which will perform the proper device reset. 10821 * 10822 * Return codes 10823 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10824 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10825 **/ 10826 static pci_ers_result_t 10827 lpfc_io_slot_reset(struct pci_dev *pdev) 10828 { 10829 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10830 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10831 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 10832 10833 switch (phba->pci_dev_grp) { 10834 case LPFC_PCI_DEV_LP: 10835 rc = lpfc_io_slot_reset_s3(pdev); 10836 break; 10837 case LPFC_PCI_DEV_OC: 10838 rc = lpfc_io_slot_reset_s4(pdev); 10839 break; 10840 default: 10841 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10842 "1428 Invalid PCI device group: 0x%x\n", 10843 phba->pci_dev_grp); 10844 break; 10845 } 10846 return rc; 10847 } 10848 10849 /** 10850 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 10851 * @pdev: pointer to PCI device 10852 * 10853 * This routine is registered to the PCI subsystem for error handling. It 10854 * is called when kernel error recovery tells the lpfc driver that it is 10855 * OK to resume normal PCI operation after PCI bus error recovery. When 10856 * this routine is invoked, it dispatches the action to the proper SLI-3 10857 * or SLI-4 device io_resume routine, which will resume the device operation. 10858 **/ 10859 static void 10860 lpfc_io_resume(struct pci_dev *pdev) 10861 { 10862 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10863 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10864 10865 switch (phba->pci_dev_grp) { 10866 case LPFC_PCI_DEV_LP: 10867 lpfc_io_resume_s3(pdev); 10868 break; 10869 case LPFC_PCI_DEV_OC: 10870 lpfc_io_resume_s4(pdev); 10871 break; 10872 default: 10873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10874 "1429 Invalid PCI device group: 0x%x\n", 10875 phba->pci_dev_grp); 10876 break; 10877 } 10878 return; 10879 } 10880 10881 /** 10882 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 10883 * @phba: pointer to lpfc hba data structure. 10884 * 10885 * This routine checks to see if OAS is supported for this adapter. If 10886 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 10887 * the enable oas flag is cleared and the pool created for OAS device data 10888 * is destroyed. 10889 * 10890 **/ 10891 void 10892 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 10893 { 10894 10895 if (!phba->cfg_EnableXLane) 10896 return; 10897 10898 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 10899 phba->cfg_fof = 1; 10900 } else { 10901 phba->cfg_EnableXLane = 0; 10902 if (phba->device_data_mem_pool) 10903 mempool_destroy(phba->device_data_mem_pool); 10904 phba->device_data_mem_pool = NULL; 10905 } 10906 10907 return; 10908 } 10909 10910 /** 10911 * lpfc_fof_queue_setup - Set up all the fof queues 10912 * @phba: pointer to lpfc hba data structure. 10913 * 10914 * This routine is invoked to set up all the fof queues for the FC HBA 10915 * operation. 10916 * 10917 * Return codes 10918 * 0 - successful 10919 * -ENOMEM - No available memory 10920 **/ 10921 int 10922 lpfc_fof_queue_setup(struct lpfc_hba *phba) 10923 { 10924 struct lpfc_sli *psli = &phba->sli; 10925 int rc; 10926 10927 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); 10928 if (rc) 10929 return -ENOMEM; 10930 10931 if (phba->cfg_EnableXLane) { 10932 10933 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 10934 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 10935 if (rc) 10936 goto out_oas_cq; 10937 10938 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, 10939 phba->sli4_hba.oas_cq, LPFC_FCP); 10940 if (rc) 10941 goto out_oas_wq; 10942 10943 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; 10944 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; 10945 } 10946 10947 return 0; 10948 10949 out_oas_wq: 10950 if (phba->cfg_EnableXLane) 10951 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 10952 out_oas_cq: 10953 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 10954 return rc; 10955 10956 } 10957 10958 /** 10959 * lpfc_fof_queue_create - Create all the fof queues 10960 * @phba: pointer to lpfc hba data structure. 10961 * 10962 * This routine is invoked to allocate all the fof queues for the FC HBA 10963 * operation. For each SLI4 queue type, the parameters such as queue entry 10964 * count (queue depth) shall be taken from the module parameter. For now, 10965 * we just use some constant number as place holder. 10966 * 10967 * Return codes 10968 * 0 - successful 10969 * -ENOMEM - No availble memory 10970 * -EIO - The mailbox failed to complete successfully. 10971 **/ 10972 int 10973 lpfc_fof_queue_create(struct lpfc_hba *phba) 10974 { 10975 struct lpfc_queue *qdesc; 10976 10977 /* Create FOF EQ */ 10978 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 10979 phba->sli4_hba.eq_ecount); 10980 if (!qdesc) 10981 goto out_error; 10982 10983 phba->sli4_hba.fof_eq = qdesc; 10984 10985 if (phba->cfg_EnableXLane) { 10986 10987 /* Create OAS CQ */ 10988 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 10989 phba->sli4_hba.cq_ecount); 10990 if (!qdesc) 10991 goto out_error; 10992 10993 phba->sli4_hba.oas_cq = qdesc; 10994 10995 /* Create OAS WQ */ 10996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 10997 phba->sli4_hba.wq_ecount); 10998 if (!qdesc) 10999 goto out_error; 11000 11001 phba->sli4_hba.oas_wq = qdesc; 11002 11003 } 11004 return 0; 11005 11006 out_error: 11007 lpfc_fof_queue_destroy(phba); 11008 return -ENOMEM; 11009 } 11010 11011 /** 11012 * lpfc_fof_queue_destroy - Destroy all the fof queues 11013 * @phba: pointer to lpfc hba data structure. 11014 * 11015 * This routine is invoked to release all the SLI4 queues with the FC HBA 11016 * operation. 11017 * 11018 * Return codes 11019 * 0 - successful 11020 **/ 11021 int 11022 lpfc_fof_queue_destroy(struct lpfc_hba *phba) 11023 { 11024 /* Release FOF Event queue */ 11025 if (phba->sli4_hba.fof_eq != NULL) { 11026 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); 11027 phba->sli4_hba.fof_eq = NULL; 11028 } 11029 11030 /* Release OAS Completion queue */ 11031 if (phba->sli4_hba.oas_cq != NULL) { 11032 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); 11033 phba->sli4_hba.oas_cq = NULL; 11034 } 11035 11036 /* Release OAS Work queue */ 11037 if (phba->sli4_hba.oas_wq != NULL) { 11038 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); 11039 phba->sli4_hba.oas_wq = NULL; 11040 } 11041 return 0; 11042 } 11043 11044 static struct pci_device_id lpfc_id_table[] = { 11045 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 11046 PCI_ANY_ID, PCI_ANY_ID, }, 11047 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 11048 PCI_ANY_ID, PCI_ANY_ID, }, 11049 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 11050 PCI_ANY_ID, PCI_ANY_ID, }, 11051 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 11052 PCI_ANY_ID, PCI_ANY_ID, }, 11053 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 11054 PCI_ANY_ID, PCI_ANY_ID, }, 11055 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 11056 PCI_ANY_ID, PCI_ANY_ID, }, 11057 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 11058 PCI_ANY_ID, PCI_ANY_ID, }, 11059 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 11060 PCI_ANY_ID, PCI_ANY_ID, }, 11061 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 11062 PCI_ANY_ID, PCI_ANY_ID, }, 11063 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 11064 PCI_ANY_ID, PCI_ANY_ID, }, 11065 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 11066 PCI_ANY_ID, PCI_ANY_ID, }, 11067 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 11068 PCI_ANY_ID, PCI_ANY_ID, }, 11069 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 11070 PCI_ANY_ID, PCI_ANY_ID, }, 11071 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 11072 PCI_ANY_ID, PCI_ANY_ID, }, 11073 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 11074 PCI_ANY_ID, PCI_ANY_ID, }, 11075 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 11076 PCI_ANY_ID, PCI_ANY_ID, }, 11077 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 11078 PCI_ANY_ID, PCI_ANY_ID, }, 11079 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 11080 PCI_ANY_ID, PCI_ANY_ID, }, 11081 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 11082 PCI_ANY_ID, PCI_ANY_ID, }, 11083 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 11084 PCI_ANY_ID, PCI_ANY_ID, }, 11085 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 11086 PCI_ANY_ID, PCI_ANY_ID, }, 11087 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 11088 PCI_ANY_ID, PCI_ANY_ID, }, 11089 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 11090 PCI_ANY_ID, PCI_ANY_ID, }, 11091 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 11092 PCI_ANY_ID, PCI_ANY_ID, }, 11093 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 11094 PCI_ANY_ID, PCI_ANY_ID, }, 11095 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 11096 PCI_ANY_ID, PCI_ANY_ID, }, 11097 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 11098 PCI_ANY_ID, PCI_ANY_ID, }, 11099 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 11100 PCI_ANY_ID, PCI_ANY_ID, }, 11101 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 11102 PCI_ANY_ID, PCI_ANY_ID, }, 11103 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 11104 PCI_ANY_ID, PCI_ANY_ID, }, 11105 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 11106 PCI_ANY_ID, PCI_ANY_ID, }, 11107 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 11108 PCI_ANY_ID, PCI_ANY_ID, }, 11109 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 11110 PCI_ANY_ID, PCI_ANY_ID, }, 11111 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 11112 PCI_ANY_ID, PCI_ANY_ID, }, 11113 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 11114 PCI_ANY_ID, PCI_ANY_ID, }, 11115 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 11116 PCI_ANY_ID, PCI_ANY_ID, }, 11117 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 11118 PCI_ANY_ID, PCI_ANY_ID, }, 11119 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 11120 PCI_ANY_ID, PCI_ANY_ID, }, 11121 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 11122 PCI_ANY_ID, PCI_ANY_ID, }, 11123 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 11124 PCI_ANY_ID, PCI_ANY_ID, }, 11125 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 11126 PCI_ANY_ID, PCI_ANY_ID, }, 11127 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 11128 PCI_ANY_ID, PCI_ANY_ID, }, 11129 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 11130 PCI_ANY_ID, PCI_ANY_ID, }, 11131 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, 11132 PCI_ANY_ID, PCI_ANY_ID, }, 11133 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, 11134 PCI_ANY_ID, PCI_ANY_ID, }, 11135 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, 11136 PCI_ANY_ID, PCI_ANY_ID, }, 11137 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, 11138 PCI_ANY_ID, PCI_ANY_ID, }, 11139 { 0 } 11140 }; 11141 11142 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 11143 11144 static const struct pci_error_handlers lpfc_err_handler = { 11145 .error_detected = lpfc_io_error_detected, 11146 .slot_reset = lpfc_io_slot_reset, 11147 .resume = lpfc_io_resume, 11148 }; 11149 11150 static struct pci_driver lpfc_driver = { 11151 .name = LPFC_DRIVER_NAME, 11152 .id_table = lpfc_id_table, 11153 .probe = lpfc_pci_probe_one, 11154 .remove = lpfc_pci_remove_one, 11155 .suspend = lpfc_pci_suspend_one, 11156 .resume = lpfc_pci_resume_one, 11157 .err_handler = &lpfc_err_handler, 11158 }; 11159 11160 static const struct file_operations lpfc_mgmt_fop = { 11161 .owner = THIS_MODULE, 11162 }; 11163 11164 static struct miscdevice lpfc_mgmt_dev = { 11165 .minor = MISC_DYNAMIC_MINOR, 11166 .name = "lpfcmgmt", 11167 .fops = &lpfc_mgmt_fop, 11168 }; 11169 11170 /** 11171 * lpfc_init - lpfc module initialization routine 11172 * 11173 * This routine is to be invoked when the lpfc module is loaded into the 11174 * kernel. The special kernel macro module_init() is used to indicate the 11175 * role of this routine to the kernel as lpfc module entry point. 11176 * 11177 * Return codes 11178 * 0 - successful 11179 * -ENOMEM - FC attach transport failed 11180 * all others - failed 11181 */ 11182 static int __init 11183 lpfc_init(void) 11184 { 11185 int cpu; 11186 int error = 0; 11187 11188 printk(LPFC_MODULE_DESC "\n"); 11189 printk(LPFC_COPYRIGHT "\n"); 11190 11191 error = misc_register(&lpfc_mgmt_dev); 11192 if (error) 11193 printk(KERN_ERR "Could not register lpfcmgmt device, " 11194 "misc_register returned with status %d", error); 11195 11196 if (lpfc_enable_npiv) { 11197 lpfc_transport_functions.vport_create = lpfc_vport_create; 11198 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 11199 } 11200 lpfc_transport_template = 11201 fc_attach_transport(&lpfc_transport_functions); 11202 if (lpfc_transport_template == NULL) 11203 return -ENOMEM; 11204 if (lpfc_enable_npiv) { 11205 lpfc_vport_transport_template = 11206 fc_attach_transport(&lpfc_vport_transport_functions); 11207 if (lpfc_vport_transport_template == NULL) { 11208 fc_release_transport(lpfc_transport_template); 11209 return -ENOMEM; 11210 } 11211 } 11212 11213 /* Initialize in case vector mapping is needed */ 11214 lpfc_used_cpu = NULL; 11215 lpfc_present_cpu = 0; 11216 for_each_present_cpu(cpu) 11217 lpfc_present_cpu++; 11218 11219 error = pci_register_driver(&lpfc_driver); 11220 if (error) { 11221 fc_release_transport(lpfc_transport_template); 11222 if (lpfc_enable_npiv) 11223 fc_release_transport(lpfc_vport_transport_template); 11224 } 11225 11226 return error; 11227 } 11228 11229 /** 11230 * lpfc_exit - lpfc module removal routine 11231 * 11232 * This routine is invoked when the lpfc module is removed from the kernel. 11233 * The special kernel macro module_exit() is used to indicate the role of 11234 * this routine to the kernel as lpfc module exit point. 11235 */ 11236 static void __exit 11237 lpfc_exit(void) 11238 { 11239 misc_deregister(&lpfc_mgmt_dev); 11240 pci_unregister_driver(&lpfc_driver); 11241 fc_release_transport(lpfc_transport_template); 11242 if (lpfc_enable_npiv) 11243 fc_release_transport(lpfc_vport_transport_template); 11244 if (_dump_buf_data) { 11245 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 11246 "_dump_buf_data at 0x%p\n", 11247 (1L << _dump_buf_data_order), _dump_buf_data); 11248 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 11249 } 11250 11251 if (_dump_buf_dif) { 11252 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 11253 "_dump_buf_dif at 0x%p\n", 11254 (1L << _dump_buf_dif_order), _dump_buf_dif); 11255 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 11256 } 11257 kfree(lpfc_used_cpu); 11258 } 11259 11260 module_init(lpfc_init); 11261 module_exit(lpfc_exit); 11262 MODULE_LICENSE("GPL"); 11263 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 11264 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 11265 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 11266