1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/kthread.h> 29 #include <linux/pci.h> 30 #include <linux/spinlock.h> 31 #include <linux/ctype.h> 32 #include <linux/aer.h> 33 #include <linux/slab.h> 34 #include <linux/firmware.h> 35 #include <linux/miscdevice.h> 36 #include <linux/percpu.h> 37 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_transport_fc.h> 42 43 #include "lpfc_hw4.h" 44 #include "lpfc_hw.h" 45 #include "lpfc_sli.h" 46 #include "lpfc_sli4.h" 47 #include "lpfc_nl.h" 48 #include "lpfc_disc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc.h" 51 #include "lpfc_logmsg.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_vport.h" 54 #include "lpfc_version.h" 55 56 char *_dump_buf_data; 57 unsigned long _dump_buf_data_order; 58 char *_dump_buf_dif; 59 unsigned long _dump_buf_dif_order; 60 spinlock_t _dump_buf_lock; 61 62 /* Used when mapping IRQ vectors in a driver centric manner */ 63 uint16_t *lpfc_used_cpu; 64 uint32_t lpfc_present_cpu; 65 66 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 67 static int lpfc_post_rcv_buf(struct lpfc_hba *); 68 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 69 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 70 static int lpfc_setup_endian_order(struct lpfc_hba *); 71 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 72 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 73 static void lpfc_init_sgl_list(struct lpfc_hba *); 74 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 75 static void lpfc_free_active_sgl(struct lpfc_hba *); 76 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 77 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 78 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 79 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 80 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 81 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 82 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 83 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 84 85 static struct scsi_transport_template *lpfc_transport_template = NULL; 86 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 87 static DEFINE_IDR(lpfc_hba_index); 88 89 /** 90 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 91 * @phba: pointer to lpfc hba data structure. 92 * 93 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 94 * mailbox command. It retrieves the revision information from the HBA and 95 * collects the Vital Product Data (VPD) about the HBA for preparing the 96 * configuration of the HBA. 97 * 98 * Return codes: 99 * 0 - success. 100 * -ERESTART - requests the SLI layer to reset the HBA and try again. 101 * Any other value - indicates an error. 102 **/ 103 int 104 lpfc_config_port_prep(struct lpfc_hba *phba) 105 { 106 lpfc_vpd_t *vp = &phba->vpd; 107 int i = 0, rc; 108 LPFC_MBOXQ_t *pmb; 109 MAILBOX_t *mb; 110 char *lpfc_vpd_data = NULL; 111 uint16_t offset = 0; 112 static char licensed[56] = 113 "key unlock for use with gnu public licensed code only\0"; 114 static int init_key = 1; 115 116 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 117 if (!pmb) { 118 phba->link_state = LPFC_HBA_ERROR; 119 return -ENOMEM; 120 } 121 122 mb = &pmb->u.mb; 123 phba->link_state = LPFC_INIT_MBX_CMDS; 124 125 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 126 if (init_key) { 127 uint32_t *ptext = (uint32_t *) licensed; 128 129 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 130 *ptext = cpu_to_be32(*ptext); 131 init_key = 0; 132 } 133 134 lpfc_read_nv(phba, pmb); 135 memset((char*)mb->un.varRDnvp.rsvd3, 0, 136 sizeof (mb->un.varRDnvp.rsvd3)); 137 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 138 sizeof (licensed)); 139 140 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 141 142 if (rc != MBX_SUCCESS) { 143 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 144 "0324 Config Port initialization " 145 "error, mbxCmd x%x READ_NVPARM, " 146 "mbxStatus x%x\n", 147 mb->mbxCommand, mb->mbxStatus); 148 mempool_free(pmb, phba->mbox_mem_pool); 149 return -ERESTART; 150 } 151 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 152 sizeof(phba->wwnn)); 153 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 154 sizeof(phba->wwpn)); 155 } 156 157 phba->sli3_options = 0x0; 158 159 /* Setup and issue mailbox READ REV command */ 160 lpfc_read_rev(phba, pmb); 161 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 162 if (rc != MBX_SUCCESS) { 163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 164 "0439 Adapter failed to init, mbxCmd x%x " 165 "READ_REV, mbxStatus x%x\n", 166 mb->mbxCommand, mb->mbxStatus); 167 mempool_free( pmb, phba->mbox_mem_pool); 168 return -ERESTART; 169 } 170 171 172 /* 173 * The value of rr must be 1 since the driver set the cv field to 1. 174 * This setting requires the FW to set all revision fields. 175 */ 176 if (mb->un.varRdRev.rr == 0) { 177 vp->rev.rBit = 0; 178 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 179 "0440 Adapter failed to init, READ_REV has " 180 "missing revision information.\n"); 181 mempool_free(pmb, phba->mbox_mem_pool); 182 return -ERESTART; 183 } 184 185 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 186 mempool_free(pmb, phba->mbox_mem_pool); 187 return -EINVAL; 188 } 189 190 /* Save information as VPD data */ 191 vp->rev.rBit = 1; 192 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 193 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 194 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 195 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 196 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 197 vp->rev.biuRev = mb->un.varRdRev.biuRev; 198 vp->rev.smRev = mb->un.varRdRev.smRev; 199 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 200 vp->rev.endecRev = mb->un.varRdRev.endecRev; 201 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 202 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 203 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 204 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 205 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 206 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 207 208 /* If the sli feature level is less then 9, we must 209 * tear down all RPIs and VPIs on link down if NPIV 210 * is enabled. 211 */ 212 if (vp->rev.feaLevelHigh < 9) 213 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 214 215 if (lpfc_is_LC_HBA(phba->pcidev->device)) 216 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 217 sizeof (phba->RandomData)); 218 219 /* Get adapter VPD information */ 220 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 221 if (!lpfc_vpd_data) 222 goto out_free_mbox; 223 do { 224 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 225 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 226 227 if (rc != MBX_SUCCESS) { 228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 229 "0441 VPD not present on adapter, " 230 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 231 mb->mbxCommand, mb->mbxStatus); 232 mb->un.varDmp.word_cnt = 0; 233 } 234 /* dump mem may return a zero when finished or we got a 235 * mailbox error, either way we are done. 236 */ 237 if (mb->un.varDmp.word_cnt == 0) 238 break; 239 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 240 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 241 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 242 lpfc_vpd_data + offset, 243 mb->un.varDmp.word_cnt); 244 offset += mb->un.varDmp.word_cnt; 245 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 246 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 247 248 kfree(lpfc_vpd_data); 249 out_free_mbox: 250 mempool_free(pmb, phba->mbox_mem_pool); 251 return 0; 252 } 253 254 /** 255 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 256 * @phba: pointer to lpfc hba data structure. 257 * @pmboxq: pointer to the driver internal queue element for mailbox command. 258 * 259 * This is the completion handler for driver's configuring asynchronous event 260 * mailbox command to the device. If the mailbox command returns successfully, 261 * it will set internal async event support flag to 1; otherwise, it will 262 * set internal async event support flag to 0. 263 **/ 264 static void 265 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 266 { 267 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 268 phba->temp_sensor_support = 1; 269 else 270 phba->temp_sensor_support = 0; 271 mempool_free(pmboxq, phba->mbox_mem_pool); 272 return; 273 } 274 275 /** 276 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 277 * @phba: pointer to lpfc hba data structure. 278 * @pmboxq: pointer to the driver internal queue element for mailbox command. 279 * 280 * This is the completion handler for dump mailbox command for getting 281 * wake up parameters. When this command complete, the response contain 282 * Option rom version of the HBA. This function translate the version number 283 * into a human readable string and store it in OptionROMVersion. 284 **/ 285 static void 286 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 287 { 288 struct prog_id *prg; 289 uint32_t prog_id_word; 290 char dist = ' '; 291 /* character array used for decoding dist type. */ 292 char dist_char[] = "nabx"; 293 294 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 295 mempool_free(pmboxq, phba->mbox_mem_pool); 296 return; 297 } 298 299 prg = (struct prog_id *) &prog_id_word; 300 301 /* word 7 contain option rom version */ 302 prog_id_word = pmboxq->u.mb.un.varWords[7]; 303 304 /* Decode the Option rom version word to a readable string */ 305 if (prg->dist < 4) 306 dist = dist_char[prg->dist]; 307 308 if ((prg->dist == 3) && (prg->num == 0)) 309 sprintf(phba->OptionROMVersion, "%d.%d%d", 310 prg->ver, prg->rev, prg->lev); 311 else 312 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 313 prg->ver, prg->rev, prg->lev, 314 dist, prg->num); 315 mempool_free(pmboxq, phba->mbox_mem_pool); 316 return; 317 } 318 319 /** 320 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 321 * cfg_soft_wwnn, cfg_soft_wwpn 322 * @vport: pointer to lpfc vport data structure. 323 * 324 * 325 * Return codes 326 * None. 327 **/ 328 void 329 lpfc_update_vport_wwn(struct lpfc_vport *vport) 330 { 331 /* If the soft name exists then update it using the service params */ 332 if (vport->phba->cfg_soft_wwnn) 333 u64_to_wwn(vport->phba->cfg_soft_wwnn, 334 vport->fc_sparam.nodeName.u.wwn); 335 if (vport->phba->cfg_soft_wwpn) 336 u64_to_wwn(vport->phba->cfg_soft_wwpn, 337 vport->fc_sparam.portName.u.wwn); 338 339 /* 340 * If the name is empty or there exists a soft name 341 * then copy the service params name, otherwise use the fc name 342 */ 343 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 344 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 345 sizeof(struct lpfc_name)); 346 else 347 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 348 sizeof(struct lpfc_name)); 349 350 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) 351 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 352 sizeof(struct lpfc_name)); 353 else 354 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 355 sizeof(struct lpfc_name)); 356 } 357 358 /** 359 * lpfc_config_port_post - Perform lpfc initialization after config port 360 * @phba: pointer to lpfc hba data structure. 361 * 362 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 363 * command call. It performs all internal resource and state setups on the 364 * port: post IOCB buffers, enable appropriate host interrupt attentions, 365 * ELS ring timers, etc. 366 * 367 * Return codes 368 * 0 - success. 369 * Any other value - error. 370 **/ 371 int 372 lpfc_config_port_post(struct lpfc_hba *phba) 373 { 374 struct lpfc_vport *vport = phba->pport; 375 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 376 LPFC_MBOXQ_t *pmb; 377 MAILBOX_t *mb; 378 struct lpfc_dmabuf *mp; 379 struct lpfc_sli *psli = &phba->sli; 380 uint32_t status, timeout; 381 int i, j; 382 int rc; 383 384 spin_lock_irq(&phba->hbalock); 385 /* 386 * If the Config port completed correctly the HBA is not 387 * over heated any more. 388 */ 389 if (phba->over_temp_state == HBA_OVER_TEMP) 390 phba->over_temp_state = HBA_NORMAL_TEMP; 391 spin_unlock_irq(&phba->hbalock); 392 393 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 394 if (!pmb) { 395 phba->link_state = LPFC_HBA_ERROR; 396 return -ENOMEM; 397 } 398 mb = &pmb->u.mb; 399 400 /* Get login parameters for NID. */ 401 rc = lpfc_read_sparam(phba, pmb, 0); 402 if (rc) { 403 mempool_free(pmb, phba->mbox_mem_pool); 404 return -ENOMEM; 405 } 406 407 pmb->vport = vport; 408 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 410 "0448 Adapter failed init, mbxCmd x%x " 411 "READ_SPARM mbxStatus x%x\n", 412 mb->mbxCommand, mb->mbxStatus); 413 phba->link_state = LPFC_HBA_ERROR; 414 mp = (struct lpfc_dmabuf *) pmb->context1; 415 mempool_free(pmb, phba->mbox_mem_pool); 416 lpfc_mbuf_free(phba, mp->virt, mp->phys); 417 kfree(mp); 418 return -EIO; 419 } 420 421 mp = (struct lpfc_dmabuf *) pmb->context1; 422 423 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 424 lpfc_mbuf_free(phba, mp->virt, mp->phys); 425 kfree(mp); 426 pmb->context1 = NULL; 427 lpfc_update_vport_wwn(vport); 428 429 /* Update the fc_host data structures with new wwn. */ 430 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 431 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 432 fc_host_max_npiv_vports(shost) = phba->max_vpi; 433 434 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 435 /* This should be consolidated into parse_vpd ? - mr */ 436 if (phba->SerialNumber[0] == 0) { 437 uint8_t *outptr; 438 439 outptr = &vport->fc_nodename.u.s.IEEE[0]; 440 for (i = 0; i < 12; i++) { 441 status = *outptr++; 442 j = ((status & 0xf0) >> 4); 443 if (j <= 9) 444 phba->SerialNumber[i] = 445 (char)((uint8_t) 0x30 + (uint8_t) j); 446 else 447 phba->SerialNumber[i] = 448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 449 i++; 450 j = (status & 0xf); 451 if (j <= 9) 452 phba->SerialNumber[i] = 453 (char)((uint8_t) 0x30 + (uint8_t) j); 454 else 455 phba->SerialNumber[i] = 456 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 457 } 458 } 459 460 lpfc_read_config(phba, pmb); 461 pmb->vport = vport; 462 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 464 "0453 Adapter failed to init, mbxCmd x%x " 465 "READ_CONFIG, mbxStatus x%x\n", 466 mb->mbxCommand, mb->mbxStatus); 467 phba->link_state = LPFC_HBA_ERROR; 468 mempool_free( pmb, phba->mbox_mem_pool); 469 return -EIO; 470 } 471 472 /* Check if the port is disabled */ 473 lpfc_sli_read_link_ste(phba); 474 475 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 476 i = (mb->un.varRdConfig.max_xri + 1); 477 if (phba->cfg_hba_queue_depth > i) { 478 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 479 "3359 HBA queue depth changed from %d to %d\n", 480 phba->cfg_hba_queue_depth, i); 481 phba->cfg_hba_queue_depth = i; 482 } 483 484 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 485 i = (mb->un.varRdConfig.max_xri >> 3); 486 if (phba->pport->cfg_lun_queue_depth > i) { 487 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 488 "3360 LUN queue depth changed from %d to %d\n", 489 phba->pport->cfg_lun_queue_depth, i); 490 phba->pport->cfg_lun_queue_depth = i; 491 } 492 493 phba->lmt = mb->un.varRdConfig.lmt; 494 495 /* Get the default values for Model Name and Description */ 496 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 497 498 phba->link_state = LPFC_LINK_DOWN; 499 500 /* Only process IOCBs on ELS ring till hba_state is READY */ 501 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr) 502 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 503 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr) 504 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 505 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr) 506 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 507 508 /* Post receive buffers for desired rings */ 509 if (phba->sli_rev != 3) 510 lpfc_post_rcv_buf(phba); 511 512 /* 513 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 514 */ 515 if (phba->intr_type == MSIX) { 516 rc = lpfc_config_msi(phba, pmb); 517 if (rc) { 518 mempool_free(pmb, phba->mbox_mem_pool); 519 return -EIO; 520 } 521 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 522 if (rc != MBX_SUCCESS) { 523 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 524 "0352 Config MSI mailbox command " 525 "failed, mbxCmd x%x, mbxStatus x%x\n", 526 pmb->u.mb.mbxCommand, 527 pmb->u.mb.mbxStatus); 528 mempool_free(pmb, phba->mbox_mem_pool); 529 return -EIO; 530 } 531 } 532 533 spin_lock_irq(&phba->hbalock); 534 /* Initialize ERATT handling flag */ 535 phba->hba_flag &= ~HBA_ERATT_HANDLED; 536 537 /* Enable appropriate host interrupts */ 538 if (lpfc_readl(phba->HCregaddr, &status)) { 539 spin_unlock_irq(&phba->hbalock); 540 return -EIO; 541 } 542 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 543 if (psli->num_rings > 0) 544 status |= HC_R0INT_ENA; 545 if (psli->num_rings > 1) 546 status |= HC_R1INT_ENA; 547 if (psli->num_rings > 2) 548 status |= HC_R2INT_ENA; 549 if (psli->num_rings > 3) 550 status |= HC_R3INT_ENA; 551 552 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 553 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 554 status &= ~(HC_R0INT_ENA); 555 556 writel(status, phba->HCregaddr); 557 readl(phba->HCregaddr); /* flush */ 558 spin_unlock_irq(&phba->hbalock); 559 560 /* Set up ring-0 (ELS) timer */ 561 timeout = phba->fc_ratov * 2; 562 mod_timer(&vport->els_tmofunc, 563 jiffies + msecs_to_jiffies(1000 * timeout)); 564 /* Set up heart beat (HB) timer */ 565 mod_timer(&phba->hb_tmofunc, 566 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 567 phba->hb_outstanding = 0; 568 phba->last_completion_time = jiffies; 569 /* Set up error attention (ERATT) polling timer */ 570 mod_timer(&phba->eratt_poll, 571 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); 572 573 if (phba->hba_flag & LINK_DISABLED) { 574 lpfc_printf_log(phba, 575 KERN_ERR, LOG_INIT, 576 "2598 Adapter Link is disabled.\n"); 577 lpfc_down_link(phba, pmb); 578 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 579 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 580 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 581 lpfc_printf_log(phba, 582 KERN_ERR, LOG_INIT, 583 "2599 Adapter failed to issue DOWN_LINK" 584 " mbox command rc 0x%x\n", rc); 585 586 mempool_free(pmb, phba->mbox_mem_pool); 587 return -EIO; 588 } 589 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 590 mempool_free(pmb, phba->mbox_mem_pool); 591 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 592 if (rc) 593 return rc; 594 } 595 /* MBOX buffer will be freed in mbox compl */ 596 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 597 if (!pmb) { 598 phba->link_state = LPFC_HBA_ERROR; 599 return -ENOMEM; 600 } 601 602 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 603 pmb->mbox_cmpl = lpfc_config_async_cmpl; 604 pmb->vport = phba->pport; 605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 606 607 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 608 lpfc_printf_log(phba, 609 KERN_ERR, 610 LOG_INIT, 611 "0456 Adapter failed to issue " 612 "ASYNCEVT_ENABLE mbox status x%x\n", 613 rc); 614 mempool_free(pmb, phba->mbox_mem_pool); 615 } 616 617 /* Get Option rom version */ 618 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 619 if (!pmb) { 620 phba->link_state = LPFC_HBA_ERROR; 621 return -ENOMEM; 622 } 623 624 lpfc_dump_wakeup_param(phba, pmb); 625 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 626 pmb->vport = phba->pport; 627 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 628 629 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 631 "to get Option ROM version status x%x\n", rc); 632 mempool_free(pmb, phba->mbox_mem_pool); 633 } 634 635 return 0; 636 } 637 638 /** 639 * lpfc_hba_init_link - Initialize the FC link 640 * @phba: pointer to lpfc hba data structure. 641 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 642 * 643 * This routine will issue the INIT_LINK mailbox command call. 644 * It is available to other drivers through the lpfc_hba data 645 * structure for use as a delayed link up mechanism with the 646 * module parameter lpfc_suppress_link_up. 647 * 648 * Return code 649 * 0 - success 650 * Any other value - error 651 **/ 652 int 653 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 654 { 655 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 656 } 657 658 /** 659 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 660 * @phba: pointer to lpfc hba data structure. 661 * @fc_topology: desired fc topology. 662 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 663 * 664 * This routine will issue the INIT_LINK mailbox command call. 665 * It is available to other drivers through the lpfc_hba data 666 * structure for use as a delayed link up mechanism with the 667 * module parameter lpfc_suppress_link_up. 668 * 669 * Return code 670 * 0 - success 671 * Any other value - error 672 **/ 673 int 674 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 675 uint32_t flag) 676 { 677 struct lpfc_vport *vport = phba->pport; 678 LPFC_MBOXQ_t *pmb; 679 MAILBOX_t *mb; 680 int rc; 681 682 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 683 if (!pmb) { 684 phba->link_state = LPFC_HBA_ERROR; 685 return -ENOMEM; 686 } 687 mb = &pmb->u.mb; 688 pmb->vport = vport; 689 690 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 691 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 692 !(phba->lmt & LMT_1Gb)) || 693 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 694 !(phba->lmt & LMT_2Gb)) || 695 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 696 !(phba->lmt & LMT_4Gb)) || 697 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 698 !(phba->lmt & LMT_8Gb)) || 699 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 700 !(phba->lmt & LMT_10Gb)) || 701 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 702 !(phba->lmt & LMT_16Gb))) { 703 /* Reset link speed to auto */ 704 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 705 "1302 Invalid speed for this board:%d " 706 "Reset link speed to auto.\n", 707 phba->cfg_link_speed); 708 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 709 } 710 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 711 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 712 if (phba->sli_rev < LPFC_SLI_REV4) 713 lpfc_set_loopback_flag(phba); 714 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 715 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 717 "0498 Adapter failed to init, mbxCmd x%x " 718 "INIT_LINK, mbxStatus x%x\n", 719 mb->mbxCommand, mb->mbxStatus); 720 if (phba->sli_rev <= LPFC_SLI_REV3) { 721 /* Clear all interrupt enable conditions */ 722 writel(0, phba->HCregaddr); 723 readl(phba->HCregaddr); /* flush */ 724 /* Clear all pending interrupts */ 725 writel(0xffffffff, phba->HAregaddr); 726 readl(phba->HAregaddr); /* flush */ 727 } 728 phba->link_state = LPFC_HBA_ERROR; 729 if (rc != MBX_BUSY || flag == MBX_POLL) 730 mempool_free(pmb, phba->mbox_mem_pool); 731 return -EIO; 732 } 733 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 734 if (flag == MBX_POLL) 735 mempool_free(pmb, phba->mbox_mem_pool); 736 737 return 0; 738 } 739 740 /** 741 * lpfc_hba_down_link - this routine downs the FC link 742 * @phba: pointer to lpfc hba data structure. 743 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 744 * 745 * This routine will issue the DOWN_LINK mailbox command call. 746 * It is available to other drivers through the lpfc_hba data 747 * structure for use to stop the link. 748 * 749 * Return code 750 * 0 - success 751 * Any other value - error 752 **/ 753 int 754 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 755 { 756 LPFC_MBOXQ_t *pmb; 757 int rc; 758 759 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 760 if (!pmb) { 761 phba->link_state = LPFC_HBA_ERROR; 762 return -ENOMEM; 763 } 764 765 lpfc_printf_log(phba, 766 KERN_ERR, LOG_INIT, 767 "0491 Adapter Link is disabled.\n"); 768 lpfc_down_link(phba, pmb); 769 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 770 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 771 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 772 lpfc_printf_log(phba, 773 KERN_ERR, LOG_INIT, 774 "2522 Adapter failed to issue DOWN_LINK" 775 " mbox command rc 0x%x\n", rc); 776 777 mempool_free(pmb, phba->mbox_mem_pool); 778 return -EIO; 779 } 780 if (flag == MBX_POLL) 781 mempool_free(pmb, phba->mbox_mem_pool); 782 783 return 0; 784 } 785 786 /** 787 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 788 * @phba: pointer to lpfc HBA data structure. 789 * 790 * This routine will do LPFC uninitialization before the HBA is reset when 791 * bringing down the SLI Layer. 792 * 793 * Return codes 794 * 0 - success. 795 * Any other value - error. 796 **/ 797 int 798 lpfc_hba_down_prep(struct lpfc_hba *phba) 799 { 800 struct lpfc_vport **vports; 801 int i; 802 803 if (phba->sli_rev <= LPFC_SLI_REV3) { 804 /* Disable interrupts */ 805 writel(0, phba->HCregaddr); 806 readl(phba->HCregaddr); /* flush */ 807 } 808 809 if (phba->pport->load_flag & FC_UNLOADING) 810 lpfc_cleanup_discovery_resources(phba->pport); 811 else { 812 vports = lpfc_create_vport_work_array(phba); 813 if (vports != NULL) 814 for (i = 0; i <= phba->max_vports && 815 vports[i] != NULL; i++) 816 lpfc_cleanup_discovery_resources(vports[i]); 817 lpfc_destroy_vport_work_array(phba, vports); 818 } 819 return 0; 820 } 821 822 /** 823 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 824 * rspiocb which got deferred 825 * 826 * @phba: pointer to lpfc HBA data structure. 827 * 828 * This routine will cleanup completed slow path events after HBA is reset 829 * when bringing down the SLI Layer. 830 * 831 * 832 * Return codes 833 * void. 834 **/ 835 static void 836 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 837 { 838 struct lpfc_iocbq *rspiocbq; 839 struct hbq_dmabuf *dmabuf; 840 struct lpfc_cq_event *cq_event; 841 842 spin_lock_irq(&phba->hbalock); 843 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 844 spin_unlock_irq(&phba->hbalock); 845 846 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 847 /* Get the response iocb from the head of work queue */ 848 spin_lock_irq(&phba->hbalock); 849 list_remove_head(&phba->sli4_hba.sp_queue_event, 850 cq_event, struct lpfc_cq_event, list); 851 spin_unlock_irq(&phba->hbalock); 852 853 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 854 case CQE_CODE_COMPL_WQE: 855 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 856 cq_event); 857 lpfc_sli_release_iocbq(phba, rspiocbq); 858 break; 859 case CQE_CODE_RECEIVE: 860 case CQE_CODE_RECEIVE_V1: 861 dmabuf = container_of(cq_event, struct hbq_dmabuf, 862 cq_event); 863 lpfc_in_buf_free(phba, &dmabuf->dbuf); 864 } 865 } 866 } 867 868 /** 869 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 870 * @phba: pointer to lpfc HBA data structure. 871 * 872 * This routine will cleanup posted ELS buffers after the HBA is reset 873 * when bringing down the SLI Layer. 874 * 875 * 876 * Return codes 877 * void. 878 **/ 879 static void 880 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 881 { 882 struct lpfc_sli *psli = &phba->sli; 883 struct lpfc_sli_ring *pring; 884 struct lpfc_dmabuf *mp, *next_mp; 885 LIST_HEAD(buflist); 886 int count; 887 888 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 889 lpfc_sli_hbqbuf_free_all(phba); 890 else { 891 /* Cleanup preposted buffers on the ELS ring */ 892 pring = &psli->ring[LPFC_ELS_RING]; 893 spin_lock_irq(&phba->hbalock); 894 list_splice_init(&pring->postbufq, &buflist); 895 spin_unlock_irq(&phba->hbalock); 896 897 count = 0; 898 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 899 list_del(&mp->list); 900 count++; 901 lpfc_mbuf_free(phba, mp->virt, mp->phys); 902 kfree(mp); 903 } 904 905 spin_lock_irq(&phba->hbalock); 906 pring->postbufq_cnt -= count; 907 spin_unlock_irq(&phba->hbalock); 908 } 909 } 910 911 /** 912 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 913 * @phba: pointer to lpfc HBA data structure. 914 * 915 * This routine will cleanup the txcmplq after the HBA is reset when bringing 916 * down the SLI Layer. 917 * 918 * Return codes 919 * void 920 **/ 921 static void 922 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 923 { 924 struct lpfc_sli *psli = &phba->sli; 925 struct lpfc_sli_ring *pring; 926 LIST_HEAD(completions); 927 int i; 928 929 for (i = 0; i < psli->num_rings; i++) { 930 pring = &psli->ring[i]; 931 if (phba->sli_rev >= LPFC_SLI_REV4) 932 spin_lock_irq(&pring->ring_lock); 933 else 934 spin_lock_irq(&phba->hbalock); 935 /* At this point in time the HBA is either reset or DOA. Either 936 * way, nothing should be on txcmplq as it will NEVER complete. 937 */ 938 list_splice_init(&pring->txcmplq, &completions); 939 pring->txcmplq_cnt = 0; 940 941 if (phba->sli_rev >= LPFC_SLI_REV4) 942 spin_unlock_irq(&pring->ring_lock); 943 else 944 spin_unlock_irq(&phba->hbalock); 945 946 /* Cancel all the IOCBs from the completions list */ 947 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 948 IOERR_SLI_ABORTED); 949 lpfc_sli_abort_iocb_ring(phba, pring); 950 } 951 } 952 953 /** 954 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 955 int i; 956 * @phba: pointer to lpfc HBA data structure. 957 * 958 * This routine will do uninitialization after the HBA is reset when bring 959 * down the SLI Layer. 960 * 961 * Return codes 962 * 0 - success. 963 * Any other value - error. 964 **/ 965 static int 966 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 967 { 968 lpfc_hba_free_post_buf(phba); 969 lpfc_hba_clean_txcmplq(phba); 970 return 0; 971 } 972 973 /** 974 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 975 * @phba: pointer to lpfc HBA data structure. 976 * 977 * This routine will do uninitialization after the HBA is reset when bring 978 * down the SLI Layer. 979 * 980 * Return codes 981 * 0 - success. 982 * Any other value - error. 983 **/ 984 static int 985 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 986 { 987 struct lpfc_scsi_buf *psb, *psb_next; 988 LIST_HEAD(aborts); 989 unsigned long iflag = 0; 990 struct lpfc_sglq *sglq_entry = NULL; 991 992 lpfc_hba_free_post_buf(phba); 993 lpfc_hba_clean_txcmplq(phba); 994 995 /* At this point in time the HBA is either reset or DOA. Either 996 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 997 * on the lpfc_sgl_list so that it can either be freed if the 998 * driver is unloading or reposted if the driver is restarting 999 * the port. 1000 */ 1001 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 1002 /* scsl_buf_list */ 1003 /* abts_sgl_list_lock required because worker thread uses this 1004 * list. 1005 */ 1006 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 1007 list_for_each_entry(sglq_entry, 1008 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1009 sglq_entry->state = SGL_FREED; 1010 1011 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1012 &phba->sli4_hba.lpfc_sgl_list); 1013 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 1014 /* abts_scsi_buf_list_lock required because worker thread uses this 1015 * list. 1016 */ 1017 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1018 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 1019 &aborts); 1020 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1021 spin_unlock_irq(&phba->hbalock); 1022 1023 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1024 psb->pCmd = NULL; 1025 psb->status = IOSTAT_SUCCESS; 1026 } 1027 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1028 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1029 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1030 1031 lpfc_sli4_free_sp_events(phba); 1032 return 0; 1033 } 1034 1035 /** 1036 * lpfc_hba_down_post - Wrapper func for hba down post routine 1037 * @phba: pointer to lpfc HBA data structure. 1038 * 1039 * This routine wraps the actual SLI3 or SLI4 routine for performing 1040 * uninitialization after the HBA is reset when bring down the SLI Layer. 1041 * 1042 * Return codes 1043 * 0 - success. 1044 * Any other value - error. 1045 **/ 1046 int 1047 lpfc_hba_down_post(struct lpfc_hba *phba) 1048 { 1049 return (*phba->lpfc_hba_down_post)(phba); 1050 } 1051 1052 /** 1053 * lpfc_hb_timeout - The HBA-timer timeout handler 1054 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1055 * 1056 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1057 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1058 * work-port-events bitmap and the worker thread is notified. This timeout 1059 * event will be used by the worker thread to invoke the actual timeout 1060 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1061 * be performed in the timeout handler and the HBA timeout event bit shall 1062 * be cleared by the worker thread after it has taken the event bitmap out. 1063 **/ 1064 static void 1065 lpfc_hb_timeout(unsigned long ptr) 1066 { 1067 struct lpfc_hba *phba; 1068 uint32_t tmo_posted; 1069 unsigned long iflag; 1070 1071 phba = (struct lpfc_hba *)ptr; 1072 1073 /* Check for heart beat timeout conditions */ 1074 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1075 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1076 if (!tmo_posted) 1077 phba->pport->work_port_events |= WORKER_HB_TMO; 1078 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1079 1080 /* Tell the worker thread there is work to do */ 1081 if (!tmo_posted) 1082 lpfc_worker_wake_up(phba); 1083 return; 1084 } 1085 1086 /** 1087 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1088 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1089 * 1090 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1091 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1092 * work-port-events bitmap and the worker thread is notified. This timeout 1093 * event will be used by the worker thread to invoke the actual timeout 1094 * handler routine, lpfc_rrq_handler. Any periodical operations will 1095 * be performed in the timeout handler and the RRQ timeout event bit shall 1096 * be cleared by the worker thread after it has taken the event bitmap out. 1097 **/ 1098 static void 1099 lpfc_rrq_timeout(unsigned long ptr) 1100 { 1101 struct lpfc_hba *phba; 1102 unsigned long iflag; 1103 1104 phba = (struct lpfc_hba *)ptr; 1105 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1106 if (!(phba->pport->load_flag & FC_UNLOADING)) 1107 phba->hba_flag |= HBA_RRQ_ACTIVE; 1108 else 1109 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1110 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1111 1112 if (!(phba->pport->load_flag & FC_UNLOADING)) 1113 lpfc_worker_wake_up(phba); 1114 } 1115 1116 /** 1117 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1118 * @phba: pointer to lpfc hba data structure. 1119 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1120 * 1121 * This is the callback function to the lpfc heart-beat mailbox command. 1122 * If configured, the lpfc driver issues the heart-beat mailbox command to 1123 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1124 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1125 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1126 * heart-beat outstanding state. Once the mailbox command comes back and 1127 * no error conditions detected, the heart-beat mailbox command timer is 1128 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1129 * state is cleared for the next heart-beat. If the timer expired with the 1130 * heart-beat outstanding state set, the driver will put the HBA offline. 1131 **/ 1132 static void 1133 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1134 { 1135 unsigned long drvr_flag; 1136 1137 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1138 phba->hb_outstanding = 0; 1139 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1140 1141 /* Check and reset heart-beat timer is necessary */ 1142 mempool_free(pmboxq, phba->mbox_mem_pool); 1143 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1144 !(phba->link_state == LPFC_HBA_ERROR) && 1145 !(phba->pport->load_flag & FC_UNLOADING)) 1146 mod_timer(&phba->hb_tmofunc, 1147 jiffies + 1148 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1149 return; 1150 } 1151 1152 /** 1153 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1154 * @phba: pointer to lpfc hba data structure. 1155 * 1156 * This is the actual HBA-timer timeout handler to be invoked by the worker 1157 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1158 * handler performs any periodic operations needed for the device. If such 1159 * periodic event has already been attended to either in the interrupt handler 1160 * or by processing slow-ring or fast-ring events within the HBA-timer 1161 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1162 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1163 * is configured and there is no heart-beat mailbox command outstanding, a 1164 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1165 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1166 * to offline. 1167 **/ 1168 void 1169 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1170 { 1171 struct lpfc_vport **vports; 1172 LPFC_MBOXQ_t *pmboxq; 1173 struct lpfc_dmabuf *buf_ptr; 1174 int retval, i; 1175 struct lpfc_sli *psli = &phba->sli; 1176 LIST_HEAD(completions); 1177 1178 vports = lpfc_create_vport_work_array(phba); 1179 if (vports != NULL) 1180 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1181 lpfc_rcv_seq_check_edtov(vports[i]); 1182 lpfc_destroy_vport_work_array(phba, vports); 1183 1184 if ((phba->link_state == LPFC_HBA_ERROR) || 1185 (phba->pport->load_flag & FC_UNLOADING) || 1186 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1187 return; 1188 1189 spin_lock_irq(&phba->pport->work_port_lock); 1190 1191 if (time_after(phba->last_completion_time + 1192 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1193 jiffies)) { 1194 spin_unlock_irq(&phba->pport->work_port_lock); 1195 if (!phba->hb_outstanding) 1196 mod_timer(&phba->hb_tmofunc, 1197 jiffies + 1198 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1199 else 1200 mod_timer(&phba->hb_tmofunc, 1201 jiffies + 1202 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1203 return; 1204 } 1205 spin_unlock_irq(&phba->pport->work_port_lock); 1206 1207 if (phba->elsbuf_cnt && 1208 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1209 spin_lock_irq(&phba->hbalock); 1210 list_splice_init(&phba->elsbuf, &completions); 1211 phba->elsbuf_cnt = 0; 1212 phba->elsbuf_prev_cnt = 0; 1213 spin_unlock_irq(&phba->hbalock); 1214 1215 while (!list_empty(&completions)) { 1216 list_remove_head(&completions, buf_ptr, 1217 struct lpfc_dmabuf, list); 1218 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1219 kfree(buf_ptr); 1220 } 1221 } 1222 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1223 1224 /* If there is no heart beat outstanding, issue a heartbeat command */ 1225 if (phba->cfg_enable_hba_heartbeat) { 1226 if (!phba->hb_outstanding) { 1227 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1228 (list_empty(&psli->mboxq))) { 1229 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1230 GFP_KERNEL); 1231 if (!pmboxq) { 1232 mod_timer(&phba->hb_tmofunc, 1233 jiffies + 1234 msecs_to_jiffies(1000 * 1235 LPFC_HB_MBOX_INTERVAL)); 1236 return; 1237 } 1238 1239 lpfc_heart_beat(phba, pmboxq); 1240 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1241 pmboxq->vport = phba->pport; 1242 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1243 MBX_NOWAIT); 1244 1245 if (retval != MBX_BUSY && 1246 retval != MBX_SUCCESS) { 1247 mempool_free(pmboxq, 1248 phba->mbox_mem_pool); 1249 mod_timer(&phba->hb_tmofunc, 1250 jiffies + 1251 msecs_to_jiffies(1000 * 1252 LPFC_HB_MBOX_INTERVAL)); 1253 return; 1254 } 1255 phba->skipped_hb = 0; 1256 phba->hb_outstanding = 1; 1257 } else if (time_before_eq(phba->last_completion_time, 1258 phba->skipped_hb)) { 1259 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1260 "2857 Last completion time not " 1261 " updated in %d ms\n", 1262 jiffies_to_msecs(jiffies 1263 - phba->last_completion_time)); 1264 } else 1265 phba->skipped_hb = jiffies; 1266 1267 mod_timer(&phba->hb_tmofunc, 1268 jiffies + 1269 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1270 return; 1271 } else { 1272 /* 1273 * If heart beat timeout called with hb_outstanding set 1274 * we need to give the hb mailbox cmd a chance to 1275 * complete or TMO. 1276 */ 1277 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1278 "0459 Adapter heartbeat still out" 1279 "standing:last compl time was %d ms.\n", 1280 jiffies_to_msecs(jiffies 1281 - phba->last_completion_time)); 1282 mod_timer(&phba->hb_tmofunc, 1283 jiffies + 1284 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1285 } 1286 } 1287 } 1288 1289 /** 1290 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1291 * @phba: pointer to lpfc hba data structure. 1292 * 1293 * This routine is called to bring the HBA offline when HBA hardware error 1294 * other than Port Error 6 has been detected. 1295 **/ 1296 static void 1297 lpfc_offline_eratt(struct lpfc_hba *phba) 1298 { 1299 struct lpfc_sli *psli = &phba->sli; 1300 1301 spin_lock_irq(&phba->hbalock); 1302 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1303 spin_unlock_irq(&phba->hbalock); 1304 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1305 1306 lpfc_offline(phba); 1307 lpfc_reset_barrier(phba); 1308 spin_lock_irq(&phba->hbalock); 1309 lpfc_sli_brdreset(phba); 1310 spin_unlock_irq(&phba->hbalock); 1311 lpfc_hba_down_post(phba); 1312 lpfc_sli_brdready(phba, HS_MBRDY); 1313 lpfc_unblock_mgmt_io(phba); 1314 phba->link_state = LPFC_HBA_ERROR; 1315 return; 1316 } 1317 1318 /** 1319 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1320 * @phba: pointer to lpfc hba data structure. 1321 * 1322 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1323 * other than Port Error 6 has been detected. 1324 **/ 1325 void 1326 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1327 { 1328 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1329 lpfc_offline(phba); 1330 lpfc_sli4_brdreset(phba); 1331 lpfc_hba_down_post(phba); 1332 lpfc_sli4_post_status_check(phba); 1333 lpfc_unblock_mgmt_io(phba); 1334 phba->link_state = LPFC_HBA_ERROR; 1335 } 1336 1337 /** 1338 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1339 * @phba: pointer to lpfc hba data structure. 1340 * 1341 * This routine is invoked to handle the deferred HBA hardware error 1342 * conditions. This type of error is indicated by HBA by setting ER1 1343 * and another ER bit in the host status register. The driver will 1344 * wait until the ER1 bit clears before handling the error condition. 1345 **/ 1346 static void 1347 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1348 { 1349 uint32_t old_host_status = phba->work_hs; 1350 struct lpfc_sli *psli = &phba->sli; 1351 1352 /* If the pci channel is offline, ignore possible errors, 1353 * since we cannot communicate with the pci card anyway. 1354 */ 1355 if (pci_channel_offline(phba->pcidev)) { 1356 spin_lock_irq(&phba->hbalock); 1357 phba->hba_flag &= ~DEFER_ERATT; 1358 spin_unlock_irq(&phba->hbalock); 1359 return; 1360 } 1361 1362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1363 "0479 Deferred Adapter Hardware Error " 1364 "Data: x%x x%x x%x\n", 1365 phba->work_hs, 1366 phba->work_status[0], phba->work_status[1]); 1367 1368 spin_lock_irq(&phba->hbalock); 1369 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1370 spin_unlock_irq(&phba->hbalock); 1371 1372 1373 /* 1374 * Firmware stops when it triggred erratt. That could cause the I/Os 1375 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1376 * SCSI layer retry it after re-establishing link. 1377 */ 1378 lpfc_sli_abort_fcp_rings(phba); 1379 1380 /* 1381 * There was a firmware error. Take the hba offline and then 1382 * attempt to restart it. 1383 */ 1384 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1385 lpfc_offline(phba); 1386 1387 /* Wait for the ER1 bit to clear.*/ 1388 while (phba->work_hs & HS_FFER1) { 1389 msleep(100); 1390 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1391 phba->work_hs = UNPLUG_ERR ; 1392 break; 1393 } 1394 /* If driver is unloading let the worker thread continue */ 1395 if (phba->pport->load_flag & FC_UNLOADING) { 1396 phba->work_hs = 0; 1397 break; 1398 } 1399 } 1400 1401 /* 1402 * This is to ptrotect against a race condition in which 1403 * first write to the host attention register clear the 1404 * host status register. 1405 */ 1406 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1407 phba->work_hs = old_host_status & ~HS_FFER1; 1408 1409 spin_lock_irq(&phba->hbalock); 1410 phba->hba_flag &= ~DEFER_ERATT; 1411 spin_unlock_irq(&phba->hbalock); 1412 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1413 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1414 } 1415 1416 static void 1417 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1418 { 1419 struct lpfc_board_event_header board_event; 1420 struct Scsi_Host *shost; 1421 1422 board_event.event_type = FC_REG_BOARD_EVENT; 1423 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1424 shost = lpfc_shost_from_vport(phba->pport); 1425 fc_host_post_vendor_event(shost, fc_get_event_number(), 1426 sizeof(board_event), 1427 (char *) &board_event, 1428 LPFC_NL_VENDOR_ID); 1429 } 1430 1431 /** 1432 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1433 * @phba: pointer to lpfc hba data structure. 1434 * 1435 * This routine is invoked to handle the following HBA hardware error 1436 * conditions: 1437 * 1 - HBA error attention interrupt 1438 * 2 - DMA ring index out of range 1439 * 3 - Mailbox command came back as unknown 1440 **/ 1441 static void 1442 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1443 { 1444 struct lpfc_vport *vport = phba->pport; 1445 struct lpfc_sli *psli = &phba->sli; 1446 uint32_t event_data; 1447 unsigned long temperature; 1448 struct temp_event temp_event_data; 1449 struct Scsi_Host *shost; 1450 1451 /* If the pci channel is offline, ignore possible errors, 1452 * since we cannot communicate with the pci card anyway. 1453 */ 1454 if (pci_channel_offline(phba->pcidev)) { 1455 spin_lock_irq(&phba->hbalock); 1456 phba->hba_flag &= ~DEFER_ERATT; 1457 spin_unlock_irq(&phba->hbalock); 1458 return; 1459 } 1460 1461 /* If resets are disabled then leave the HBA alone and return */ 1462 if (!phba->cfg_enable_hba_reset) 1463 return; 1464 1465 /* Send an internal error event to mgmt application */ 1466 lpfc_board_errevt_to_mgmt(phba); 1467 1468 if (phba->hba_flag & DEFER_ERATT) 1469 lpfc_handle_deferred_eratt(phba); 1470 1471 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1472 if (phba->work_hs & HS_FFER6) 1473 /* Re-establishing Link */ 1474 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1475 "1301 Re-establishing Link " 1476 "Data: x%x x%x x%x\n", 1477 phba->work_hs, phba->work_status[0], 1478 phba->work_status[1]); 1479 if (phba->work_hs & HS_FFER8) 1480 /* Device Zeroization */ 1481 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1482 "2861 Host Authentication device " 1483 "zeroization Data:x%x x%x x%x\n", 1484 phba->work_hs, phba->work_status[0], 1485 phba->work_status[1]); 1486 1487 spin_lock_irq(&phba->hbalock); 1488 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1489 spin_unlock_irq(&phba->hbalock); 1490 1491 /* 1492 * Firmware stops when it triggled erratt with HS_FFER6. 1493 * That could cause the I/Os dropped by the firmware. 1494 * Error iocb (I/O) on txcmplq and let the SCSI layer 1495 * retry it after re-establishing link. 1496 */ 1497 lpfc_sli_abort_fcp_rings(phba); 1498 1499 /* 1500 * There was a firmware error. Take the hba offline and then 1501 * attempt to restart it. 1502 */ 1503 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1504 lpfc_offline(phba); 1505 lpfc_sli_brdrestart(phba); 1506 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1507 lpfc_unblock_mgmt_io(phba); 1508 return; 1509 } 1510 lpfc_unblock_mgmt_io(phba); 1511 } else if (phba->work_hs & HS_CRIT_TEMP) { 1512 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1513 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1514 temp_event_data.event_code = LPFC_CRIT_TEMP; 1515 temp_event_data.data = (uint32_t)temperature; 1516 1517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1518 "0406 Adapter maximum temperature exceeded " 1519 "(%ld), taking this port offline " 1520 "Data: x%x x%x x%x\n", 1521 temperature, phba->work_hs, 1522 phba->work_status[0], phba->work_status[1]); 1523 1524 shost = lpfc_shost_from_vport(phba->pport); 1525 fc_host_post_vendor_event(shost, fc_get_event_number(), 1526 sizeof(temp_event_data), 1527 (char *) &temp_event_data, 1528 SCSI_NL_VID_TYPE_PCI 1529 | PCI_VENDOR_ID_EMULEX); 1530 1531 spin_lock_irq(&phba->hbalock); 1532 phba->over_temp_state = HBA_OVER_TEMP; 1533 spin_unlock_irq(&phba->hbalock); 1534 lpfc_offline_eratt(phba); 1535 1536 } else { 1537 /* The if clause above forces this code path when the status 1538 * failure is a value other than FFER6. Do not call the offline 1539 * twice. This is the adapter hardware error path. 1540 */ 1541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1542 "0457 Adapter Hardware Error " 1543 "Data: x%x x%x x%x\n", 1544 phba->work_hs, 1545 phba->work_status[0], phba->work_status[1]); 1546 1547 event_data = FC_REG_DUMP_EVENT; 1548 shost = lpfc_shost_from_vport(vport); 1549 fc_host_post_vendor_event(shost, fc_get_event_number(), 1550 sizeof(event_data), (char *) &event_data, 1551 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1552 1553 lpfc_offline_eratt(phba); 1554 } 1555 return; 1556 } 1557 1558 /** 1559 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1560 * @phba: pointer to lpfc hba data structure. 1561 * @mbx_action: flag for mailbox shutdown action. 1562 * 1563 * This routine is invoked to perform an SLI4 port PCI function reset in 1564 * response to port status register polling attention. It waits for port 1565 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1566 * During this process, interrupt vectors are freed and later requested 1567 * for handling possible port resource change. 1568 **/ 1569 static int 1570 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1571 bool en_rn_msg) 1572 { 1573 int rc; 1574 uint32_t intr_mode; 1575 1576 /* 1577 * On error status condition, driver need to wait for port 1578 * ready before performing reset. 1579 */ 1580 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1581 if (!rc) { 1582 /* need reset: attempt for port recovery */ 1583 if (en_rn_msg) 1584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1585 "2887 Reset Needed: Attempting Port " 1586 "Recovery...\n"); 1587 lpfc_offline_prep(phba, mbx_action); 1588 lpfc_offline(phba); 1589 /* release interrupt for possible resource change */ 1590 lpfc_sli4_disable_intr(phba); 1591 lpfc_sli_brdrestart(phba); 1592 /* request and enable interrupt */ 1593 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1594 if (intr_mode == LPFC_INTR_ERROR) { 1595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1596 "3175 Failed to enable interrupt\n"); 1597 return -EIO; 1598 } else { 1599 phba->intr_mode = intr_mode; 1600 } 1601 rc = lpfc_online(phba); 1602 if (rc == 0) 1603 lpfc_unblock_mgmt_io(phba); 1604 } 1605 return rc; 1606 } 1607 1608 /** 1609 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1610 * @phba: pointer to lpfc hba data structure. 1611 * 1612 * This routine is invoked to handle the SLI4 HBA hardware error attention 1613 * conditions. 1614 **/ 1615 static void 1616 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1617 { 1618 struct lpfc_vport *vport = phba->pport; 1619 uint32_t event_data; 1620 struct Scsi_Host *shost; 1621 uint32_t if_type; 1622 struct lpfc_register portstat_reg = {0}; 1623 uint32_t reg_err1, reg_err2; 1624 uint32_t uerrlo_reg, uemasklo_reg; 1625 uint32_t pci_rd_rc1, pci_rd_rc2; 1626 bool en_rn_msg = true; 1627 int rc; 1628 1629 /* If the pci channel is offline, ignore possible errors, since 1630 * we cannot communicate with the pci card anyway. 1631 */ 1632 if (pci_channel_offline(phba->pcidev)) 1633 return; 1634 /* If resets are disabled then leave the HBA alone and return */ 1635 if (!phba->cfg_enable_hba_reset) 1636 return; 1637 1638 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1639 switch (if_type) { 1640 case LPFC_SLI_INTF_IF_TYPE_0: 1641 pci_rd_rc1 = lpfc_readl( 1642 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1643 &uerrlo_reg); 1644 pci_rd_rc2 = lpfc_readl( 1645 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1646 &uemasklo_reg); 1647 /* consider PCI bus read error as pci_channel_offline */ 1648 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1649 return; 1650 lpfc_sli4_offline_eratt(phba); 1651 break; 1652 case LPFC_SLI_INTF_IF_TYPE_2: 1653 pci_rd_rc1 = lpfc_readl( 1654 phba->sli4_hba.u.if_type2.STATUSregaddr, 1655 &portstat_reg.word0); 1656 /* consider PCI bus read error as pci_channel_offline */ 1657 if (pci_rd_rc1 == -EIO) { 1658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1659 "3151 PCI bus read access failure: x%x\n", 1660 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1661 return; 1662 } 1663 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1664 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1665 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1666 /* TODO: Register for Overtemp async events. */ 1667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1668 "2889 Port Overtemperature event, " 1669 "taking port offline\n"); 1670 spin_lock_irq(&phba->hbalock); 1671 phba->over_temp_state = HBA_OVER_TEMP; 1672 spin_unlock_irq(&phba->hbalock); 1673 lpfc_sli4_offline_eratt(phba); 1674 break; 1675 } 1676 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1677 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1679 "3143 Port Down: Firmware Update " 1680 "Detected\n"); 1681 en_rn_msg = false; 1682 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1683 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1684 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1685 "3144 Port Down: Debug Dump\n"); 1686 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1687 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1688 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1689 "3145 Port Down: Provisioning\n"); 1690 1691 /* Check port status register for function reset */ 1692 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1693 en_rn_msg); 1694 if (rc == 0) { 1695 /* don't report event on forced debug dump */ 1696 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1697 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1698 return; 1699 else 1700 break; 1701 } 1702 /* fall through for not able to recover */ 1703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1704 "3152 Unrecoverable error, bring the port " 1705 "offline\n"); 1706 lpfc_sli4_offline_eratt(phba); 1707 break; 1708 case LPFC_SLI_INTF_IF_TYPE_1: 1709 default: 1710 break; 1711 } 1712 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1713 "3123 Report dump event to upper layer\n"); 1714 /* Send an internal error event to mgmt application */ 1715 lpfc_board_errevt_to_mgmt(phba); 1716 1717 event_data = FC_REG_DUMP_EVENT; 1718 shost = lpfc_shost_from_vport(vport); 1719 fc_host_post_vendor_event(shost, fc_get_event_number(), 1720 sizeof(event_data), (char *) &event_data, 1721 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1722 } 1723 1724 /** 1725 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1726 * @phba: pointer to lpfc HBA data structure. 1727 * 1728 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1729 * routine from the API jump table function pointer from the lpfc_hba struct. 1730 * 1731 * Return codes 1732 * 0 - success. 1733 * Any other value - error. 1734 **/ 1735 void 1736 lpfc_handle_eratt(struct lpfc_hba *phba) 1737 { 1738 (*phba->lpfc_handle_eratt)(phba); 1739 } 1740 1741 /** 1742 * lpfc_handle_latt - The HBA link event handler 1743 * @phba: pointer to lpfc hba data structure. 1744 * 1745 * This routine is invoked from the worker thread to handle a HBA host 1746 * attention link event. 1747 **/ 1748 void 1749 lpfc_handle_latt(struct lpfc_hba *phba) 1750 { 1751 struct lpfc_vport *vport = phba->pport; 1752 struct lpfc_sli *psli = &phba->sli; 1753 LPFC_MBOXQ_t *pmb; 1754 volatile uint32_t control; 1755 struct lpfc_dmabuf *mp; 1756 int rc = 0; 1757 1758 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1759 if (!pmb) { 1760 rc = 1; 1761 goto lpfc_handle_latt_err_exit; 1762 } 1763 1764 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1765 if (!mp) { 1766 rc = 2; 1767 goto lpfc_handle_latt_free_pmb; 1768 } 1769 1770 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1771 if (!mp->virt) { 1772 rc = 3; 1773 goto lpfc_handle_latt_free_mp; 1774 } 1775 1776 /* Cleanup any outstanding ELS commands */ 1777 lpfc_els_flush_all_cmd(phba); 1778 1779 psli->slistat.link_event++; 1780 lpfc_read_topology(phba, pmb, mp); 1781 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1782 pmb->vport = vport; 1783 /* Block ELS IOCBs until we have processed this mbox command */ 1784 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1785 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1786 if (rc == MBX_NOT_FINISHED) { 1787 rc = 4; 1788 goto lpfc_handle_latt_free_mbuf; 1789 } 1790 1791 /* Clear Link Attention in HA REG */ 1792 spin_lock_irq(&phba->hbalock); 1793 writel(HA_LATT, phba->HAregaddr); 1794 readl(phba->HAregaddr); /* flush */ 1795 spin_unlock_irq(&phba->hbalock); 1796 1797 return; 1798 1799 lpfc_handle_latt_free_mbuf: 1800 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1801 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1802 lpfc_handle_latt_free_mp: 1803 kfree(mp); 1804 lpfc_handle_latt_free_pmb: 1805 mempool_free(pmb, phba->mbox_mem_pool); 1806 lpfc_handle_latt_err_exit: 1807 /* Enable Link attention interrupts */ 1808 spin_lock_irq(&phba->hbalock); 1809 psli->sli_flag |= LPFC_PROCESS_LA; 1810 control = readl(phba->HCregaddr); 1811 control |= HC_LAINT_ENA; 1812 writel(control, phba->HCregaddr); 1813 readl(phba->HCregaddr); /* flush */ 1814 1815 /* Clear Link Attention in HA REG */ 1816 writel(HA_LATT, phba->HAregaddr); 1817 readl(phba->HAregaddr); /* flush */ 1818 spin_unlock_irq(&phba->hbalock); 1819 lpfc_linkdown(phba); 1820 phba->link_state = LPFC_HBA_ERROR; 1821 1822 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1823 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1824 1825 return; 1826 } 1827 1828 /** 1829 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1830 * @phba: pointer to lpfc hba data structure. 1831 * @vpd: pointer to the vital product data. 1832 * @len: length of the vital product data in bytes. 1833 * 1834 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1835 * an array of characters. In this routine, the ModelName, ProgramType, and 1836 * ModelDesc, etc. fields of the phba data structure will be populated. 1837 * 1838 * Return codes 1839 * 0 - pointer to the VPD passed in is NULL 1840 * 1 - success 1841 **/ 1842 int 1843 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1844 { 1845 uint8_t lenlo, lenhi; 1846 int Length; 1847 int i, j; 1848 int finished = 0; 1849 int index = 0; 1850 1851 if (!vpd) 1852 return 0; 1853 1854 /* Vital Product */ 1855 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1856 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1857 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1858 (uint32_t) vpd[3]); 1859 while (!finished && (index < (len - 4))) { 1860 switch (vpd[index]) { 1861 case 0x82: 1862 case 0x91: 1863 index += 1; 1864 lenlo = vpd[index]; 1865 index += 1; 1866 lenhi = vpd[index]; 1867 index += 1; 1868 i = ((((unsigned short)lenhi) << 8) + lenlo); 1869 index += i; 1870 break; 1871 case 0x90: 1872 index += 1; 1873 lenlo = vpd[index]; 1874 index += 1; 1875 lenhi = vpd[index]; 1876 index += 1; 1877 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1878 if (Length > len - index) 1879 Length = len - index; 1880 while (Length > 0) { 1881 /* Look for Serial Number */ 1882 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1883 index += 2; 1884 i = vpd[index]; 1885 index += 1; 1886 j = 0; 1887 Length -= (3+i); 1888 while(i--) { 1889 phba->SerialNumber[j++] = vpd[index++]; 1890 if (j == 31) 1891 break; 1892 } 1893 phba->SerialNumber[j] = 0; 1894 continue; 1895 } 1896 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1897 phba->vpd_flag |= VPD_MODEL_DESC; 1898 index += 2; 1899 i = vpd[index]; 1900 index += 1; 1901 j = 0; 1902 Length -= (3+i); 1903 while(i--) { 1904 phba->ModelDesc[j++] = vpd[index++]; 1905 if (j == 255) 1906 break; 1907 } 1908 phba->ModelDesc[j] = 0; 1909 continue; 1910 } 1911 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1912 phba->vpd_flag |= VPD_MODEL_NAME; 1913 index += 2; 1914 i = vpd[index]; 1915 index += 1; 1916 j = 0; 1917 Length -= (3+i); 1918 while(i--) { 1919 phba->ModelName[j++] = vpd[index++]; 1920 if (j == 79) 1921 break; 1922 } 1923 phba->ModelName[j] = 0; 1924 continue; 1925 } 1926 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1927 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1928 index += 2; 1929 i = vpd[index]; 1930 index += 1; 1931 j = 0; 1932 Length -= (3+i); 1933 while(i--) { 1934 phba->ProgramType[j++] = vpd[index++]; 1935 if (j == 255) 1936 break; 1937 } 1938 phba->ProgramType[j] = 0; 1939 continue; 1940 } 1941 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1942 phba->vpd_flag |= VPD_PORT; 1943 index += 2; 1944 i = vpd[index]; 1945 index += 1; 1946 j = 0; 1947 Length -= (3+i); 1948 while(i--) { 1949 if ((phba->sli_rev == LPFC_SLI_REV4) && 1950 (phba->sli4_hba.pport_name_sta == 1951 LPFC_SLI4_PPNAME_GET)) { 1952 j++; 1953 index++; 1954 } else 1955 phba->Port[j++] = vpd[index++]; 1956 if (j == 19) 1957 break; 1958 } 1959 if ((phba->sli_rev != LPFC_SLI_REV4) || 1960 (phba->sli4_hba.pport_name_sta == 1961 LPFC_SLI4_PPNAME_NON)) 1962 phba->Port[j] = 0; 1963 continue; 1964 } 1965 else { 1966 index += 2; 1967 i = vpd[index]; 1968 index += 1; 1969 index += i; 1970 Length -= (3 + i); 1971 } 1972 } 1973 finished = 0; 1974 break; 1975 case 0x78: 1976 finished = 1; 1977 break; 1978 default: 1979 index ++; 1980 break; 1981 } 1982 } 1983 1984 return(1); 1985 } 1986 1987 /** 1988 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1989 * @phba: pointer to lpfc hba data structure. 1990 * @mdp: pointer to the data structure to hold the derived model name. 1991 * @descp: pointer to the data structure to hold the derived description. 1992 * 1993 * This routine retrieves HBA's description based on its registered PCI device 1994 * ID. The @descp passed into this function points to an array of 256 chars. It 1995 * shall be returned with the model name, maximum speed, and the host bus type. 1996 * The @mdp passed into this function points to an array of 80 chars. When the 1997 * function returns, the @mdp will be filled with the model name. 1998 **/ 1999 static void 2000 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2001 { 2002 lpfc_vpd_t *vp; 2003 uint16_t dev_id = phba->pcidev->device; 2004 int max_speed; 2005 int GE = 0; 2006 int oneConnect = 0; /* default is not a oneConnect */ 2007 struct { 2008 char *name; 2009 char *bus; 2010 char *function; 2011 } m = {"<Unknown>", "", ""}; 2012 2013 if (mdp && mdp[0] != '\0' 2014 && descp && descp[0] != '\0') 2015 return; 2016 2017 if (phba->lmt & LMT_16Gb) 2018 max_speed = 16; 2019 else if (phba->lmt & LMT_10Gb) 2020 max_speed = 10; 2021 else if (phba->lmt & LMT_8Gb) 2022 max_speed = 8; 2023 else if (phba->lmt & LMT_4Gb) 2024 max_speed = 4; 2025 else if (phba->lmt & LMT_2Gb) 2026 max_speed = 2; 2027 else if (phba->lmt & LMT_1Gb) 2028 max_speed = 1; 2029 else 2030 max_speed = 0; 2031 2032 vp = &phba->vpd; 2033 2034 switch (dev_id) { 2035 case PCI_DEVICE_ID_FIREFLY: 2036 m = (typeof(m)){"LP6000", "PCI", 2037 "Obsolete, Unsupported Fibre Channel Adapter"}; 2038 break; 2039 case PCI_DEVICE_ID_SUPERFLY: 2040 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2041 m = (typeof(m)){"LP7000", "PCI", ""}; 2042 else 2043 m = (typeof(m)){"LP7000E", "PCI", ""}; 2044 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2045 break; 2046 case PCI_DEVICE_ID_DRAGONFLY: 2047 m = (typeof(m)){"LP8000", "PCI", 2048 "Obsolete, Unsupported Fibre Channel Adapter"}; 2049 break; 2050 case PCI_DEVICE_ID_CENTAUR: 2051 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2052 m = (typeof(m)){"LP9002", "PCI", ""}; 2053 else 2054 m = (typeof(m)){"LP9000", "PCI", ""}; 2055 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2056 break; 2057 case PCI_DEVICE_ID_RFLY: 2058 m = (typeof(m)){"LP952", "PCI", 2059 "Obsolete, Unsupported Fibre Channel Adapter"}; 2060 break; 2061 case PCI_DEVICE_ID_PEGASUS: 2062 m = (typeof(m)){"LP9802", "PCI-X", 2063 "Obsolete, Unsupported Fibre Channel Adapter"}; 2064 break; 2065 case PCI_DEVICE_ID_THOR: 2066 m = (typeof(m)){"LP10000", "PCI-X", 2067 "Obsolete, Unsupported Fibre Channel Adapter"}; 2068 break; 2069 case PCI_DEVICE_ID_VIPER: 2070 m = (typeof(m)){"LPX1000", "PCI-X", 2071 "Obsolete, Unsupported Fibre Channel Adapter"}; 2072 break; 2073 case PCI_DEVICE_ID_PFLY: 2074 m = (typeof(m)){"LP982", "PCI-X", 2075 "Obsolete, Unsupported Fibre Channel Adapter"}; 2076 break; 2077 case PCI_DEVICE_ID_TFLY: 2078 m = (typeof(m)){"LP1050", "PCI-X", 2079 "Obsolete, Unsupported Fibre Channel Adapter"}; 2080 break; 2081 case PCI_DEVICE_ID_HELIOS: 2082 m = (typeof(m)){"LP11000", "PCI-X2", 2083 "Obsolete, Unsupported Fibre Channel Adapter"}; 2084 break; 2085 case PCI_DEVICE_ID_HELIOS_SCSP: 2086 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2087 "Obsolete, Unsupported Fibre Channel Adapter"}; 2088 break; 2089 case PCI_DEVICE_ID_HELIOS_DCSP: 2090 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2091 "Obsolete, Unsupported Fibre Channel Adapter"}; 2092 break; 2093 case PCI_DEVICE_ID_NEPTUNE: 2094 m = (typeof(m)){"LPe1000", "PCIe", 2095 "Obsolete, Unsupported Fibre Channel Adapter"}; 2096 break; 2097 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2098 m = (typeof(m)){"LPe1000-SP", "PCIe", 2099 "Obsolete, Unsupported Fibre Channel Adapter"}; 2100 break; 2101 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2102 m = (typeof(m)){"LPe1002-SP", "PCIe", 2103 "Obsolete, Unsupported Fibre Channel Adapter"}; 2104 break; 2105 case PCI_DEVICE_ID_BMID: 2106 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2107 break; 2108 case PCI_DEVICE_ID_BSMB: 2109 m = (typeof(m)){"LP111", "PCI-X2", 2110 "Obsolete, Unsupported Fibre Channel Adapter"}; 2111 break; 2112 case PCI_DEVICE_ID_ZEPHYR: 2113 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2114 break; 2115 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2116 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2117 break; 2118 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2119 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2120 GE = 1; 2121 break; 2122 case PCI_DEVICE_ID_ZMID: 2123 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2124 break; 2125 case PCI_DEVICE_ID_ZSMB: 2126 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2127 break; 2128 case PCI_DEVICE_ID_LP101: 2129 m = (typeof(m)){"LP101", "PCI-X", 2130 "Obsolete, Unsupported Fibre Channel Adapter"}; 2131 break; 2132 case PCI_DEVICE_ID_LP10000S: 2133 m = (typeof(m)){"LP10000-S", "PCI", 2134 "Obsolete, Unsupported Fibre Channel Adapter"}; 2135 break; 2136 case PCI_DEVICE_ID_LP11000S: 2137 m = (typeof(m)){"LP11000-S", "PCI-X2", 2138 "Obsolete, Unsupported Fibre Channel Adapter"}; 2139 break; 2140 case PCI_DEVICE_ID_LPE11000S: 2141 m = (typeof(m)){"LPe11000-S", "PCIe", 2142 "Obsolete, Unsupported Fibre Channel Adapter"}; 2143 break; 2144 case PCI_DEVICE_ID_SAT: 2145 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2146 break; 2147 case PCI_DEVICE_ID_SAT_MID: 2148 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2149 break; 2150 case PCI_DEVICE_ID_SAT_SMB: 2151 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2152 break; 2153 case PCI_DEVICE_ID_SAT_DCSP: 2154 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2155 break; 2156 case PCI_DEVICE_ID_SAT_SCSP: 2157 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2158 break; 2159 case PCI_DEVICE_ID_SAT_S: 2160 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2161 break; 2162 case PCI_DEVICE_ID_HORNET: 2163 m = (typeof(m)){"LP21000", "PCIe", 2164 "Obsolete, Unsupported FCoE Adapter"}; 2165 GE = 1; 2166 break; 2167 case PCI_DEVICE_ID_PROTEUS_VF: 2168 m = (typeof(m)){"LPev12000", "PCIe IOV", 2169 "Obsolete, Unsupported Fibre Channel Adapter"}; 2170 break; 2171 case PCI_DEVICE_ID_PROTEUS_PF: 2172 m = (typeof(m)){"LPev12000", "PCIe IOV", 2173 "Obsolete, Unsupported Fibre Channel Adapter"}; 2174 break; 2175 case PCI_DEVICE_ID_PROTEUS_S: 2176 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2177 "Obsolete, Unsupported Fibre Channel Adapter"}; 2178 break; 2179 case PCI_DEVICE_ID_TIGERSHARK: 2180 oneConnect = 1; 2181 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2182 break; 2183 case PCI_DEVICE_ID_TOMCAT: 2184 oneConnect = 1; 2185 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2186 break; 2187 case PCI_DEVICE_ID_FALCON: 2188 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2189 "EmulexSecure Fibre"}; 2190 break; 2191 case PCI_DEVICE_ID_BALIUS: 2192 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2193 "Obsolete, Unsupported Fibre Channel Adapter"}; 2194 break; 2195 case PCI_DEVICE_ID_LANCER_FC: 2196 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2197 break; 2198 case PCI_DEVICE_ID_LANCER_FC_VF: 2199 m = (typeof(m)){"LPe16000", "PCIe", 2200 "Obsolete, Unsupported Fibre Channel Adapter"}; 2201 break; 2202 case PCI_DEVICE_ID_LANCER_FCOE: 2203 oneConnect = 1; 2204 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2205 break; 2206 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2207 oneConnect = 1; 2208 m = (typeof(m)){"OCe15100", "PCIe", 2209 "Obsolete, Unsupported FCoE"}; 2210 break; 2211 case PCI_DEVICE_ID_SKYHAWK: 2212 case PCI_DEVICE_ID_SKYHAWK_VF: 2213 oneConnect = 1; 2214 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2215 break; 2216 default: 2217 m = (typeof(m)){"Unknown", "", ""}; 2218 break; 2219 } 2220 2221 if (mdp && mdp[0] == '\0') 2222 snprintf(mdp, 79,"%s", m.name); 2223 /* 2224 * oneConnect hba requires special processing, they are all initiators 2225 * and we put the port number on the end 2226 */ 2227 if (descp && descp[0] == '\0') { 2228 if (oneConnect) 2229 snprintf(descp, 255, 2230 "Emulex OneConnect %s, %s Initiator %s", 2231 m.name, m.function, 2232 phba->Port); 2233 else if (max_speed == 0) 2234 snprintf(descp, 255, 2235 "Emulex %s %s %s ", 2236 m.name, m.bus, m.function); 2237 else 2238 snprintf(descp, 255, 2239 "Emulex %s %d%s %s %s", 2240 m.name, max_speed, (GE) ? "GE" : "Gb", 2241 m.bus, m.function); 2242 } 2243 } 2244 2245 /** 2246 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2247 * @phba: pointer to lpfc hba data structure. 2248 * @pring: pointer to a IOCB ring. 2249 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2250 * 2251 * This routine posts a given number of IOCBs with the associated DMA buffer 2252 * descriptors specified by the cnt argument to the given IOCB ring. 2253 * 2254 * Return codes 2255 * The number of IOCBs NOT able to be posted to the IOCB ring. 2256 **/ 2257 int 2258 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2259 { 2260 IOCB_t *icmd; 2261 struct lpfc_iocbq *iocb; 2262 struct lpfc_dmabuf *mp1, *mp2; 2263 2264 cnt += pring->missbufcnt; 2265 2266 /* While there are buffers to post */ 2267 while (cnt > 0) { 2268 /* Allocate buffer for command iocb */ 2269 iocb = lpfc_sli_get_iocbq(phba); 2270 if (iocb == NULL) { 2271 pring->missbufcnt = cnt; 2272 return cnt; 2273 } 2274 icmd = &iocb->iocb; 2275 2276 /* 2 buffers can be posted per command */ 2277 /* Allocate buffer to post */ 2278 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2279 if (mp1) 2280 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2281 if (!mp1 || !mp1->virt) { 2282 kfree(mp1); 2283 lpfc_sli_release_iocbq(phba, iocb); 2284 pring->missbufcnt = cnt; 2285 return cnt; 2286 } 2287 2288 INIT_LIST_HEAD(&mp1->list); 2289 /* Allocate buffer to post */ 2290 if (cnt > 1) { 2291 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2292 if (mp2) 2293 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2294 &mp2->phys); 2295 if (!mp2 || !mp2->virt) { 2296 kfree(mp2); 2297 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2298 kfree(mp1); 2299 lpfc_sli_release_iocbq(phba, iocb); 2300 pring->missbufcnt = cnt; 2301 return cnt; 2302 } 2303 2304 INIT_LIST_HEAD(&mp2->list); 2305 } else { 2306 mp2 = NULL; 2307 } 2308 2309 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2310 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2311 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2312 icmd->ulpBdeCount = 1; 2313 cnt--; 2314 if (mp2) { 2315 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2316 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2317 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2318 cnt--; 2319 icmd->ulpBdeCount = 2; 2320 } 2321 2322 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2323 icmd->ulpLe = 1; 2324 2325 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2326 IOCB_ERROR) { 2327 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2328 kfree(mp1); 2329 cnt++; 2330 if (mp2) { 2331 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2332 kfree(mp2); 2333 cnt++; 2334 } 2335 lpfc_sli_release_iocbq(phba, iocb); 2336 pring->missbufcnt = cnt; 2337 return cnt; 2338 } 2339 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2340 if (mp2) 2341 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2342 } 2343 pring->missbufcnt = 0; 2344 return 0; 2345 } 2346 2347 /** 2348 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2349 * @phba: pointer to lpfc hba data structure. 2350 * 2351 * This routine posts initial receive IOCB buffers to the ELS ring. The 2352 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2353 * set to 64 IOCBs. 2354 * 2355 * Return codes 2356 * 0 - success (currently always success) 2357 **/ 2358 static int 2359 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2360 { 2361 struct lpfc_sli *psli = &phba->sli; 2362 2363 /* Ring 0, ELS / CT buffers */ 2364 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2365 /* Ring 2 - FCP no buffers needed */ 2366 2367 return 0; 2368 } 2369 2370 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2371 2372 /** 2373 * lpfc_sha_init - Set up initial array of hash table entries 2374 * @HashResultPointer: pointer to an array as hash table. 2375 * 2376 * This routine sets up the initial values to the array of hash table entries 2377 * for the LC HBAs. 2378 **/ 2379 static void 2380 lpfc_sha_init(uint32_t * HashResultPointer) 2381 { 2382 HashResultPointer[0] = 0x67452301; 2383 HashResultPointer[1] = 0xEFCDAB89; 2384 HashResultPointer[2] = 0x98BADCFE; 2385 HashResultPointer[3] = 0x10325476; 2386 HashResultPointer[4] = 0xC3D2E1F0; 2387 } 2388 2389 /** 2390 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2391 * @HashResultPointer: pointer to an initial/result hash table. 2392 * @HashWorkingPointer: pointer to an working hash table. 2393 * 2394 * This routine iterates an initial hash table pointed by @HashResultPointer 2395 * with the values from the working hash table pointeed by @HashWorkingPointer. 2396 * The results are putting back to the initial hash table, returned through 2397 * the @HashResultPointer as the result hash table. 2398 **/ 2399 static void 2400 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2401 { 2402 int t; 2403 uint32_t TEMP; 2404 uint32_t A, B, C, D, E; 2405 t = 16; 2406 do { 2407 HashWorkingPointer[t] = 2408 S(1, 2409 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2410 8] ^ 2411 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2412 } while (++t <= 79); 2413 t = 0; 2414 A = HashResultPointer[0]; 2415 B = HashResultPointer[1]; 2416 C = HashResultPointer[2]; 2417 D = HashResultPointer[3]; 2418 E = HashResultPointer[4]; 2419 2420 do { 2421 if (t < 20) { 2422 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2423 } else if (t < 40) { 2424 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2425 } else if (t < 60) { 2426 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2427 } else { 2428 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2429 } 2430 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2431 E = D; 2432 D = C; 2433 C = S(30, B); 2434 B = A; 2435 A = TEMP; 2436 } while (++t <= 79); 2437 2438 HashResultPointer[0] += A; 2439 HashResultPointer[1] += B; 2440 HashResultPointer[2] += C; 2441 HashResultPointer[3] += D; 2442 HashResultPointer[4] += E; 2443 2444 } 2445 2446 /** 2447 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2448 * @RandomChallenge: pointer to the entry of host challenge random number array. 2449 * @HashWorking: pointer to the entry of the working hash array. 2450 * 2451 * This routine calculates the working hash array referred by @HashWorking 2452 * from the challenge random numbers associated with the host, referred by 2453 * @RandomChallenge. The result is put into the entry of the working hash 2454 * array and returned by reference through @HashWorking. 2455 **/ 2456 static void 2457 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2458 { 2459 *HashWorking = (*RandomChallenge ^ *HashWorking); 2460 } 2461 2462 /** 2463 * lpfc_hba_init - Perform special handling for LC HBA initialization 2464 * @phba: pointer to lpfc hba data structure. 2465 * @hbainit: pointer to an array of unsigned 32-bit integers. 2466 * 2467 * This routine performs the special handling for LC HBA initialization. 2468 **/ 2469 void 2470 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2471 { 2472 int t; 2473 uint32_t *HashWorking; 2474 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2475 2476 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2477 if (!HashWorking) 2478 return; 2479 2480 HashWorking[0] = HashWorking[78] = *pwwnn++; 2481 HashWorking[1] = HashWorking[79] = *pwwnn; 2482 2483 for (t = 0; t < 7; t++) 2484 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2485 2486 lpfc_sha_init(hbainit); 2487 lpfc_sha_iterate(hbainit, HashWorking); 2488 kfree(HashWorking); 2489 } 2490 2491 /** 2492 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2493 * @vport: pointer to a virtual N_Port data structure. 2494 * 2495 * This routine performs the necessary cleanups before deleting the @vport. 2496 * It invokes the discovery state machine to perform necessary state 2497 * transitions and to release the ndlps associated with the @vport. Note, 2498 * the physical port is treated as @vport 0. 2499 **/ 2500 void 2501 lpfc_cleanup(struct lpfc_vport *vport) 2502 { 2503 struct lpfc_hba *phba = vport->phba; 2504 struct lpfc_nodelist *ndlp, *next_ndlp; 2505 int i = 0; 2506 2507 if (phba->link_state > LPFC_LINK_DOWN) 2508 lpfc_port_link_failure(vport); 2509 2510 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2511 if (!NLP_CHK_NODE_ACT(ndlp)) { 2512 ndlp = lpfc_enable_node(vport, ndlp, 2513 NLP_STE_UNUSED_NODE); 2514 if (!ndlp) 2515 continue; 2516 spin_lock_irq(&phba->ndlp_lock); 2517 NLP_SET_FREE_REQ(ndlp); 2518 spin_unlock_irq(&phba->ndlp_lock); 2519 /* Trigger the release of the ndlp memory */ 2520 lpfc_nlp_put(ndlp); 2521 continue; 2522 } 2523 spin_lock_irq(&phba->ndlp_lock); 2524 if (NLP_CHK_FREE_REQ(ndlp)) { 2525 /* The ndlp should not be in memory free mode already */ 2526 spin_unlock_irq(&phba->ndlp_lock); 2527 continue; 2528 } else 2529 /* Indicate request for freeing ndlp memory */ 2530 NLP_SET_FREE_REQ(ndlp); 2531 spin_unlock_irq(&phba->ndlp_lock); 2532 2533 if (vport->port_type != LPFC_PHYSICAL_PORT && 2534 ndlp->nlp_DID == Fabric_DID) { 2535 /* Just free up ndlp with Fabric_DID for vports */ 2536 lpfc_nlp_put(ndlp); 2537 continue; 2538 } 2539 2540 /* take care of nodes in unused state before the state 2541 * machine taking action. 2542 */ 2543 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2544 lpfc_nlp_put(ndlp); 2545 continue; 2546 } 2547 2548 if (ndlp->nlp_type & NLP_FABRIC) 2549 lpfc_disc_state_machine(vport, ndlp, NULL, 2550 NLP_EVT_DEVICE_RECOVERY); 2551 2552 lpfc_disc_state_machine(vport, ndlp, NULL, 2553 NLP_EVT_DEVICE_RM); 2554 } 2555 2556 /* At this point, ALL ndlp's should be gone 2557 * because of the previous NLP_EVT_DEVICE_RM. 2558 * Lets wait for this to happen, if needed. 2559 */ 2560 while (!list_empty(&vport->fc_nodes)) { 2561 if (i++ > 3000) { 2562 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2563 "0233 Nodelist not empty\n"); 2564 list_for_each_entry_safe(ndlp, next_ndlp, 2565 &vport->fc_nodes, nlp_listp) { 2566 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2567 LOG_NODE, 2568 "0282 did:x%x ndlp:x%p " 2569 "usgmap:x%x refcnt:%d\n", 2570 ndlp->nlp_DID, (void *)ndlp, 2571 ndlp->nlp_usg_map, 2572 atomic_read( 2573 &ndlp->kref.refcount)); 2574 } 2575 break; 2576 } 2577 2578 /* Wait for any activity on ndlps to settle */ 2579 msleep(10); 2580 } 2581 lpfc_cleanup_vports_rrqs(vport, NULL); 2582 } 2583 2584 /** 2585 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2586 * @vport: pointer to a virtual N_Port data structure. 2587 * 2588 * This routine stops all the timers associated with a @vport. This function 2589 * is invoked before disabling or deleting a @vport. Note that the physical 2590 * port is treated as @vport 0. 2591 **/ 2592 void 2593 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2594 { 2595 del_timer_sync(&vport->els_tmofunc); 2596 del_timer_sync(&vport->fc_fdmitmo); 2597 del_timer_sync(&vport->delayed_disc_tmo); 2598 lpfc_can_disctmo(vport); 2599 return; 2600 } 2601 2602 /** 2603 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2604 * @phba: pointer to lpfc hba data structure. 2605 * 2606 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2607 * caller of this routine should already hold the host lock. 2608 **/ 2609 void 2610 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2611 { 2612 /* Clear pending FCF rediscovery wait flag */ 2613 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2614 2615 /* Now, try to stop the timer */ 2616 del_timer(&phba->fcf.redisc_wait); 2617 } 2618 2619 /** 2620 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2621 * @phba: pointer to lpfc hba data structure. 2622 * 2623 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2624 * checks whether the FCF rediscovery wait timer is pending with the host 2625 * lock held before proceeding with disabling the timer and clearing the 2626 * wait timer pendig flag. 2627 **/ 2628 void 2629 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2630 { 2631 spin_lock_irq(&phba->hbalock); 2632 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2633 /* FCF rediscovery timer already fired or stopped */ 2634 spin_unlock_irq(&phba->hbalock); 2635 return; 2636 } 2637 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2638 /* Clear failover in progress flags */ 2639 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2640 spin_unlock_irq(&phba->hbalock); 2641 } 2642 2643 /** 2644 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2645 * @phba: pointer to lpfc hba data structure. 2646 * 2647 * This routine stops all the timers associated with a HBA. This function is 2648 * invoked before either putting a HBA offline or unloading the driver. 2649 **/ 2650 void 2651 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2652 { 2653 lpfc_stop_vport_timers(phba->pport); 2654 del_timer_sync(&phba->sli.mbox_tmo); 2655 del_timer_sync(&phba->fabric_block_timer); 2656 del_timer_sync(&phba->eratt_poll); 2657 del_timer_sync(&phba->hb_tmofunc); 2658 if (phba->sli_rev == LPFC_SLI_REV4) { 2659 del_timer_sync(&phba->rrq_tmr); 2660 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2661 } 2662 phba->hb_outstanding = 0; 2663 2664 switch (phba->pci_dev_grp) { 2665 case LPFC_PCI_DEV_LP: 2666 /* Stop any LightPulse device specific driver timers */ 2667 del_timer_sync(&phba->fcp_poll_timer); 2668 break; 2669 case LPFC_PCI_DEV_OC: 2670 /* Stop any OneConnect device sepcific driver timers */ 2671 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2672 break; 2673 default: 2674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2675 "0297 Invalid device group (x%x)\n", 2676 phba->pci_dev_grp); 2677 break; 2678 } 2679 return; 2680 } 2681 2682 /** 2683 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2684 * @phba: pointer to lpfc hba data structure. 2685 * 2686 * This routine marks a HBA's management interface as blocked. Once the HBA's 2687 * management interface is marked as blocked, all the user space access to 2688 * the HBA, whether they are from sysfs interface or libdfc interface will 2689 * all be blocked. The HBA is set to block the management interface when the 2690 * driver prepares the HBA interface for online or offline. 2691 **/ 2692 static void 2693 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2694 { 2695 unsigned long iflag; 2696 uint8_t actcmd = MBX_HEARTBEAT; 2697 unsigned long timeout; 2698 2699 spin_lock_irqsave(&phba->hbalock, iflag); 2700 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2701 spin_unlock_irqrestore(&phba->hbalock, iflag); 2702 if (mbx_action == LPFC_MBX_NO_WAIT) 2703 return; 2704 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2705 spin_lock_irqsave(&phba->hbalock, iflag); 2706 if (phba->sli.mbox_active) { 2707 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2708 /* Determine how long we might wait for the active mailbox 2709 * command to be gracefully completed by firmware. 2710 */ 2711 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2712 phba->sli.mbox_active) * 1000) + jiffies; 2713 } 2714 spin_unlock_irqrestore(&phba->hbalock, iflag); 2715 2716 /* Wait for the outstnading mailbox command to complete */ 2717 while (phba->sli.mbox_active) { 2718 /* Check active mailbox complete status every 2ms */ 2719 msleep(2); 2720 if (time_after(jiffies, timeout)) { 2721 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2722 "2813 Mgmt IO is Blocked %x " 2723 "- mbox cmd %x still active\n", 2724 phba->sli.sli_flag, actcmd); 2725 break; 2726 } 2727 } 2728 } 2729 2730 /** 2731 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 2732 * @phba: pointer to lpfc hba data structure. 2733 * 2734 * Allocate RPIs for all active remote nodes. This is needed whenever 2735 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 2736 * is to fixup the temporary rpi assignments. 2737 **/ 2738 void 2739 lpfc_sli4_node_prep(struct lpfc_hba *phba) 2740 { 2741 struct lpfc_nodelist *ndlp, *next_ndlp; 2742 struct lpfc_vport **vports; 2743 int i; 2744 2745 if (phba->sli_rev != LPFC_SLI_REV4) 2746 return; 2747 2748 vports = lpfc_create_vport_work_array(phba); 2749 if (vports != NULL) { 2750 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2751 if (vports[i]->load_flag & FC_UNLOADING) 2752 continue; 2753 2754 list_for_each_entry_safe(ndlp, next_ndlp, 2755 &vports[i]->fc_nodes, 2756 nlp_listp) { 2757 if (NLP_CHK_NODE_ACT(ndlp)) 2758 ndlp->nlp_rpi = 2759 lpfc_sli4_alloc_rpi(phba); 2760 } 2761 } 2762 } 2763 lpfc_destroy_vport_work_array(phba, vports); 2764 } 2765 2766 /** 2767 * lpfc_online - Initialize and bring a HBA online 2768 * @phba: pointer to lpfc hba data structure. 2769 * 2770 * This routine initializes the HBA and brings a HBA online. During this 2771 * process, the management interface is blocked to prevent user space access 2772 * to the HBA interfering with the driver initialization. 2773 * 2774 * Return codes 2775 * 0 - successful 2776 * 1 - failed 2777 **/ 2778 int 2779 lpfc_online(struct lpfc_hba *phba) 2780 { 2781 struct lpfc_vport *vport; 2782 struct lpfc_vport **vports; 2783 int i; 2784 bool vpis_cleared = false; 2785 2786 if (!phba) 2787 return 0; 2788 vport = phba->pport; 2789 2790 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2791 return 0; 2792 2793 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2794 "0458 Bring Adapter online\n"); 2795 2796 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 2797 2798 if (!lpfc_sli_queue_setup(phba)) { 2799 lpfc_unblock_mgmt_io(phba); 2800 return 1; 2801 } 2802 2803 if (phba->sli_rev == LPFC_SLI_REV4) { 2804 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2805 lpfc_unblock_mgmt_io(phba); 2806 return 1; 2807 } 2808 spin_lock_irq(&phba->hbalock); 2809 if (!phba->sli4_hba.max_cfg_param.vpi_used) 2810 vpis_cleared = true; 2811 spin_unlock_irq(&phba->hbalock); 2812 } else { 2813 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2814 lpfc_unblock_mgmt_io(phba); 2815 return 1; 2816 } 2817 } 2818 2819 vports = lpfc_create_vport_work_array(phba); 2820 if (vports != NULL) 2821 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2822 struct Scsi_Host *shost; 2823 shost = lpfc_shost_from_vport(vports[i]); 2824 spin_lock_irq(shost->host_lock); 2825 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2826 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2827 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2828 if (phba->sli_rev == LPFC_SLI_REV4) { 2829 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2830 if ((vpis_cleared) && 2831 (vports[i]->port_type != 2832 LPFC_PHYSICAL_PORT)) 2833 vports[i]->vpi = 0; 2834 } 2835 spin_unlock_irq(shost->host_lock); 2836 } 2837 lpfc_destroy_vport_work_array(phba, vports); 2838 2839 lpfc_unblock_mgmt_io(phba); 2840 return 0; 2841 } 2842 2843 /** 2844 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2845 * @phba: pointer to lpfc hba data structure. 2846 * 2847 * This routine marks a HBA's management interface as not blocked. Once the 2848 * HBA's management interface is marked as not blocked, all the user space 2849 * access to the HBA, whether they are from sysfs interface or libdfc 2850 * interface will be allowed. The HBA is set to block the management interface 2851 * when the driver prepares the HBA interface for online or offline and then 2852 * set to unblock the management interface afterwards. 2853 **/ 2854 void 2855 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2856 { 2857 unsigned long iflag; 2858 2859 spin_lock_irqsave(&phba->hbalock, iflag); 2860 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2861 spin_unlock_irqrestore(&phba->hbalock, iflag); 2862 } 2863 2864 /** 2865 * lpfc_offline_prep - Prepare a HBA to be brought offline 2866 * @phba: pointer to lpfc hba data structure. 2867 * 2868 * This routine is invoked to prepare a HBA to be brought offline. It performs 2869 * unregistration login to all the nodes on all vports and flushes the mailbox 2870 * queue to make it ready to be brought offline. 2871 **/ 2872 void 2873 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 2874 { 2875 struct lpfc_vport *vport = phba->pport; 2876 struct lpfc_nodelist *ndlp, *next_ndlp; 2877 struct lpfc_vport **vports; 2878 struct Scsi_Host *shost; 2879 int i; 2880 2881 if (vport->fc_flag & FC_OFFLINE_MODE) 2882 return; 2883 2884 lpfc_block_mgmt_io(phba, mbx_action); 2885 2886 lpfc_linkdown(phba); 2887 2888 /* Issue an unreg_login to all nodes on all vports */ 2889 vports = lpfc_create_vport_work_array(phba); 2890 if (vports != NULL) { 2891 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2892 if (vports[i]->load_flag & FC_UNLOADING) 2893 continue; 2894 shost = lpfc_shost_from_vport(vports[i]); 2895 spin_lock_irq(shost->host_lock); 2896 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2897 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2898 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2899 spin_unlock_irq(shost->host_lock); 2900 2901 shost = lpfc_shost_from_vport(vports[i]); 2902 list_for_each_entry_safe(ndlp, next_ndlp, 2903 &vports[i]->fc_nodes, 2904 nlp_listp) { 2905 if (!NLP_CHK_NODE_ACT(ndlp)) 2906 continue; 2907 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2908 continue; 2909 if (ndlp->nlp_type & NLP_FABRIC) { 2910 lpfc_disc_state_machine(vports[i], ndlp, 2911 NULL, NLP_EVT_DEVICE_RECOVERY); 2912 lpfc_disc_state_machine(vports[i], ndlp, 2913 NULL, NLP_EVT_DEVICE_RM); 2914 } 2915 spin_lock_irq(shost->host_lock); 2916 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2917 spin_unlock_irq(shost->host_lock); 2918 /* 2919 * Whenever an SLI4 port goes offline, free the 2920 * RPI. Get a new RPI when the adapter port 2921 * comes back online. 2922 */ 2923 if (phba->sli_rev == LPFC_SLI_REV4) 2924 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 2925 lpfc_unreg_rpi(vports[i], ndlp); 2926 } 2927 } 2928 } 2929 lpfc_destroy_vport_work_array(phba, vports); 2930 2931 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 2932 } 2933 2934 /** 2935 * lpfc_offline - Bring a HBA offline 2936 * @phba: pointer to lpfc hba data structure. 2937 * 2938 * This routine actually brings a HBA offline. It stops all the timers 2939 * associated with the HBA, brings down the SLI layer, and eventually 2940 * marks the HBA as in offline state for the upper layer protocol. 2941 **/ 2942 void 2943 lpfc_offline(struct lpfc_hba *phba) 2944 { 2945 struct Scsi_Host *shost; 2946 struct lpfc_vport **vports; 2947 int i; 2948 2949 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2950 return; 2951 2952 /* stop port and all timers associated with this hba */ 2953 lpfc_stop_port(phba); 2954 vports = lpfc_create_vport_work_array(phba); 2955 if (vports != NULL) 2956 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2957 lpfc_stop_vport_timers(vports[i]); 2958 lpfc_destroy_vport_work_array(phba, vports); 2959 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2960 "0460 Bring Adapter offline\n"); 2961 /* Bring down the SLI Layer and cleanup. The HBA is offline 2962 now. */ 2963 lpfc_sli_hba_down(phba); 2964 spin_lock_irq(&phba->hbalock); 2965 phba->work_ha = 0; 2966 spin_unlock_irq(&phba->hbalock); 2967 vports = lpfc_create_vport_work_array(phba); 2968 if (vports != NULL) 2969 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2970 shost = lpfc_shost_from_vport(vports[i]); 2971 spin_lock_irq(shost->host_lock); 2972 vports[i]->work_port_events = 0; 2973 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2974 spin_unlock_irq(shost->host_lock); 2975 } 2976 lpfc_destroy_vport_work_array(phba, vports); 2977 } 2978 2979 /** 2980 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2981 * @phba: pointer to lpfc hba data structure. 2982 * 2983 * This routine is to free all the SCSI buffers and IOCBs from the driver 2984 * list back to kernel. It is called from lpfc_pci_remove_one to free 2985 * the internal resources before the device is removed from the system. 2986 **/ 2987 static void 2988 lpfc_scsi_free(struct lpfc_hba *phba) 2989 { 2990 struct lpfc_scsi_buf *sb, *sb_next; 2991 struct lpfc_iocbq *io, *io_next; 2992 2993 spin_lock_irq(&phba->hbalock); 2994 2995 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2996 2997 spin_lock(&phba->scsi_buf_list_put_lock); 2998 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 2999 list) { 3000 list_del(&sb->list); 3001 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 3002 sb->dma_handle); 3003 kfree(sb); 3004 phba->total_scsi_bufs--; 3005 } 3006 spin_unlock(&phba->scsi_buf_list_put_lock); 3007 3008 spin_lock(&phba->scsi_buf_list_get_lock); 3009 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3010 list) { 3011 list_del(&sb->list); 3012 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 3013 sb->dma_handle); 3014 kfree(sb); 3015 phba->total_scsi_bufs--; 3016 } 3017 spin_unlock(&phba->scsi_buf_list_get_lock); 3018 3019 /* Release all the lpfc_iocbq entries maintained by this host. */ 3020 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 3021 list_del(&io->list); 3022 kfree(io); 3023 phba->total_iocbq_bufs--; 3024 } 3025 3026 spin_unlock_irq(&phba->hbalock); 3027 } 3028 3029 /** 3030 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping 3031 * @phba: pointer to lpfc hba data structure. 3032 * 3033 * This routine first calculates the sizes of the current els and allocated 3034 * scsi sgl lists, and then goes through all sgls to updates the physical 3035 * XRIs assigned due to port function reset. During port initialization, the 3036 * current els and allocated scsi sgl lists are 0s. 3037 * 3038 * Return codes 3039 * 0 - successful (for now, it always returns 0) 3040 **/ 3041 int 3042 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) 3043 { 3044 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3045 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; 3046 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; 3047 LIST_HEAD(els_sgl_list); 3048 LIST_HEAD(scsi_sgl_list); 3049 int rc; 3050 3051 /* 3052 * update on pci function's els xri-sgl list 3053 */ 3054 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3055 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3056 /* els xri-sgl expanded */ 3057 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3058 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3059 "3157 ELS xri-sgl count increased from " 3060 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3061 els_xri_cnt); 3062 /* allocate the additional els sgls */ 3063 for (i = 0; i < xri_cnt; i++) { 3064 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3065 GFP_KERNEL); 3066 if (sglq_entry == NULL) { 3067 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3068 "2562 Failure to allocate an " 3069 "ELS sgl entry:%d\n", i); 3070 rc = -ENOMEM; 3071 goto out_free_mem; 3072 } 3073 sglq_entry->buff_type = GEN_BUFF_TYPE; 3074 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3075 &sglq_entry->phys); 3076 if (sglq_entry->virt == NULL) { 3077 kfree(sglq_entry); 3078 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3079 "2563 Failure to allocate an " 3080 "ELS mbuf:%d\n", i); 3081 rc = -ENOMEM; 3082 goto out_free_mem; 3083 } 3084 sglq_entry->sgl = sglq_entry->virt; 3085 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3086 sglq_entry->state = SGL_FREED; 3087 list_add_tail(&sglq_entry->list, &els_sgl_list); 3088 } 3089 spin_lock_irq(&phba->hbalock); 3090 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3091 spin_unlock_irq(&phba->hbalock); 3092 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3093 /* els xri-sgl shrinked */ 3094 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3095 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3096 "3158 ELS xri-sgl count decreased from " 3097 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3098 els_xri_cnt); 3099 spin_lock_irq(&phba->hbalock); 3100 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); 3101 spin_unlock_irq(&phba->hbalock); 3102 /* release extra els sgls from list */ 3103 for (i = 0; i < xri_cnt; i++) { 3104 list_remove_head(&els_sgl_list, 3105 sglq_entry, struct lpfc_sglq, list); 3106 if (sglq_entry) { 3107 lpfc_mbuf_free(phba, sglq_entry->virt, 3108 sglq_entry->phys); 3109 kfree(sglq_entry); 3110 } 3111 } 3112 spin_lock_irq(&phba->hbalock); 3113 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3114 spin_unlock_irq(&phba->hbalock); 3115 } else 3116 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3117 "3163 ELS xri-sgl count unchanged: %d\n", 3118 els_xri_cnt); 3119 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3120 3121 /* update xris to els sgls on the list */ 3122 sglq_entry = NULL; 3123 sglq_entry_next = NULL; 3124 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3125 &phba->sli4_hba.lpfc_sgl_list, list) { 3126 lxri = lpfc_sli4_next_xritag(phba); 3127 if (lxri == NO_XRI) { 3128 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3129 "2400 Failed to allocate xri for " 3130 "ELS sgl\n"); 3131 rc = -ENOMEM; 3132 goto out_free_mem; 3133 } 3134 sglq_entry->sli4_lxritag = lxri; 3135 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3136 } 3137 3138 /* 3139 * update on pci function's allocated scsi xri-sgl list 3140 */ 3141 phba->total_scsi_bufs = 0; 3142 3143 /* maximum number of xris available for scsi buffers */ 3144 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - 3145 els_xri_cnt; 3146 3147 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3148 "2401 Current allocated SCSI xri-sgl count:%d, " 3149 "maximum SCSI xri count:%d\n", 3150 phba->sli4_hba.scsi_xri_cnt, 3151 phba->sli4_hba.scsi_xri_max); 3152 3153 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3154 spin_lock(&phba->scsi_buf_list_put_lock); 3155 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3156 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3157 spin_unlock(&phba->scsi_buf_list_put_lock); 3158 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3159 3160 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3161 /* max scsi xri shrinked below the allocated scsi buffers */ 3162 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - 3163 phba->sli4_hba.scsi_xri_max; 3164 /* release the extra allocated scsi buffers */ 3165 for (i = 0; i < scsi_xri_cnt; i++) { 3166 list_remove_head(&scsi_sgl_list, psb, 3167 struct lpfc_scsi_buf, list); 3168 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data, 3169 psb->dma_handle); 3170 kfree(psb); 3171 } 3172 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3173 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3174 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3175 } 3176 3177 /* update xris associated to remaining allocated scsi buffers */ 3178 psb = NULL; 3179 psb_next = NULL; 3180 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { 3181 lxri = lpfc_sli4_next_xritag(phba); 3182 if (lxri == NO_XRI) { 3183 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3184 "2560 Failed to allocate xri for " 3185 "scsi buffer\n"); 3186 rc = -ENOMEM; 3187 goto out_free_mem; 3188 } 3189 psb->cur_iocbq.sli4_lxritag = lxri; 3190 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3191 } 3192 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3193 spin_lock(&phba->scsi_buf_list_put_lock); 3194 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3195 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3196 spin_unlock(&phba->scsi_buf_list_put_lock); 3197 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3198 3199 return 0; 3200 3201 out_free_mem: 3202 lpfc_free_els_sgl_list(phba); 3203 lpfc_scsi_free(phba); 3204 return rc; 3205 } 3206 3207 /** 3208 * lpfc_create_port - Create an FC port 3209 * @phba: pointer to lpfc hba data structure. 3210 * @instance: a unique integer ID to this FC port. 3211 * @dev: pointer to the device data structure. 3212 * 3213 * This routine creates a FC port for the upper layer protocol. The FC port 3214 * can be created on top of either a physical port or a virtual port provided 3215 * by the HBA. This routine also allocates a SCSI host data structure (shost) 3216 * and associates the FC port created before adding the shost into the SCSI 3217 * layer. 3218 * 3219 * Return codes 3220 * @vport - pointer to the virtual N_Port data structure. 3221 * NULL - port create failed. 3222 **/ 3223 struct lpfc_vport * 3224 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 3225 { 3226 struct lpfc_vport *vport; 3227 struct Scsi_Host *shost; 3228 int error = 0; 3229 3230 if (dev != &phba->pcidev->dev) 3231 shost = scsi_host_alloc(&lpfc_vport_template, 3232 sizeof(struct lpfc_vport)); 3233 else 3234 shost = scsi_host_alloc(&lpfc_template, 3235 sizeof(struct lpfc_vport)); 3236 if (!shost) 3237 goto out; 3238 3239 vport = (struct lpfc_vport *) shost->hostdata; 3240 vport->phba = phba; 3241 vport->load_flag |= FC_LOADING; 3242 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3243 vport->fc_rscn_flush = 0; 3244 3245 lpfc_get_vport_cfgparam(vport); 3246 shost->unique_id = instance; 3247 shost->max_id = LPFC_MAX_TARGET; 3248 shost->max_lun = vport->cfg_max_luns; 3249 shost->this_id = -1; 3250 shost->max_cmd_len = 16; 3251 if (phba->sli_rev == LPFC_SLI_REV4) { 3252 shost->dma_boundary = 3253 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3254 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3255 } 3256 3257 /* 3258 * Set initial can_queue value since 0 is no longer supported and 3259 * scsi_add_host will fail. This will be adjusted later based on the 3260 * max xri value determined in hba setup. 3261 */ 3262 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3263 if (dev != &phba->pcidev->dev) { 3264 shost->transportt = lpfc_vport_transport_template; 3265 vport->port_type = LPFC_NPIV_PORT; 3266 } else { 3267 shost->transportt = lpfc_transport_template; 3268 vport->port_type = LPFC_PHYSICAL_PORT; 3269 } 3270 3271 /* Initialize all internally managed lists. */ 3272 INIT_LIST_HEAD(&vport->fc_nodes); 3273 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3274 spin_lock_init(&vport->work_port_lock); 3275 3276 init_timer(&vport->fc_disctmo); 3277 vport->fc_disctmo.function = lpfc_disc_timeout; 3278 vport->fc_disctmo.data = (unsigned long)vport; 3279 3280 init_timer(&vport->fc_fdmitmo); 3281 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 3282 vport->fc_fdmitmo.data = (unsigned long)vport; 3283 3284 init_timer(&vport->els_tmofunc); 3285 vport->els_tmofunc.function = lpfc_els_timeout; 3286 vport->els_tmofunc.data = (unsigned long)vport; 3287 3288 init_timer(&vport->delayed_disc_tmo); 3289 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 3290 vport->delayed_disc_tmo.data = (unsigned long)vport; 3291 3292 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3293 if (error) 3294 goto out_put_shost; 3295 3296 spin_lock_irq(&phba->hbalock); 3297 list_add_tail(&vport->listentry, &phba->port_list); 3298 spin_unlock_irq(&phba->hbalock); 3299 return vport; 3300 3301 out_put_shost: 3302 scsi_host_put(shost); 3303 out: 3304 return NULL; 3305 } 3306 3307 /** 3308 * destroy_port - destroy an FC port 3309 * @vport: pointer to an lpfc virtual N_Port data structure. 3310 * 3311 * This routine destroys a FC port from the upper layer protocol. All the 3312 * resources associated with the port are released. 3313 **/ 3314 void 3315 destroy_port(struct lpfc_vport *vport) 3316 { 3317 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3318 struct lpfc_hba *phba = vport->phba; 3319 3320 lpfc_debugfs_terminate(vport); 3321 fc_remove_host(shost); 3322 scsi_remove_host(shost); 3323 3324 spin_lock_irq(&phba->hbalock); 3325 list_del_init(&vport->listentry); 3326 spin_unlock_irq(&phba->hbalock); 3327 3328 lpfc_cleanup(vport); 3329 return; 3330 } 3331 3332 /** 3333 * lpfc_get_instance - Get a unique integer ID 3334 * 3335 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 3336 * uses the kernel idr facility to perform the task. 3337 * 3338 * Return codes: 3339 * instance - a unique integer ID allocated as the new instance. 3340 * -1 - lpfc get instance failed. 3341 **/ 3342 int 3343 lpfc_get_instance(void) 3344 { 3345 int ret; 3346 3347 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 3348 return ret < 0 ? -1 : ret; 3349 } 3350 3351 /** 3352 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 3353 * @shost: pointer to SCSI host data structure. 3354 * @time: elapsed time of the scan in jiffies. 3355 * 3356 * This routine is called by the SCSI layer with a SCSI host to determine 3357 * whether the scan host is finished. 3358 * 3359 * Note: there is no scan_start function as adapter initialization will have 3360 * asynchronously kicked off the link initialization. 3361 * 3362 * Return codes 3363 * 0 - SCSI host scan is not over yet. 3364 * 1 - SCSI host scan is over. 3365 **/ 3366 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 3367 { 3368 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3369 struct lpfc_hba *phba = vport->phba; 3370 int stat = 0; 3371 3372 spin_lock_irq(shost->host_lock); 3373 3374 if (vport->load_flag & FC_UNLOADING) { 3375 stat = 1; 3376 goto finished; 3377 } 3378 if (time >= msecs_to_jiffies(30 * 1000)) { 3379 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3380 "0461 Scanning longer than 30 " 3381 "seconds. Continuing initialization\n"); 3382 stat = 1; 3383 goto finished; 3384 } 3385 if (time >= msecs_to_jiffies(15 * 1000) && 3386 phba->link_state <= LPFC_LINK_DOWN) { 3387 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3388 "0465 Link down longer than 15 " 3389 "seconds. Continuing initialization\n"); 3390 stat = 1; 3391 goto finished; 3392 } 3393 3394 if (vport->port_state != LPFC_VPORT_READY) 3395 goto finished; 3396 if (vport->num_disc_nodes || vport->fc_prli_sent) 3397 goto finished; 3398 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 3399 goto finished; 3400 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3401 goto finished; 3402 3403 stat = 1; 3404 3405 finished: 3406 spin_unlock_irq(shost->host_lock); 3407 return stat; 3408 } 3409 3410 /** 3411 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 3412 * @shost: pointer to SCSI host data structure. 3413 * 3414 * This routine initializes a given SCSI host attributes on a FC port. The 3415 * SCSI host can be either on top of a physical port or a virtual port. 3416 **/ 3417 void lpfc_host_attrib_init(struct Scsi_Host *shost) 3418 { 3419 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3420 struct lpfc_hba *phba = vport->phba; 3421 /* 3422 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 3423 */ 3424 3425 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 3426 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3427 fc_host_supported_classes(shost) = FC_COS_CLASS3; 3428 3429 memset(fc_host_supported_fc4s(shost), 0, 3430 sizeof(fc_host_supported_fc4s(shost))); 3431 fc_host_supported_fc4s(shost)[2] = 1; 3432 fc_host_supported_fc4s(shost)[7] = 1; 3433 3434 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 3435 sizeof fc_host_symbolic_name(shost)); 3436 3437 fc_host_supported_speeds(shost) = 0; 3438 if (phba->lmt & LMT_16Gb) 3439 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 3440 if (phba->lmt & LMT_10Gb) 3441 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 3442 if (phba->lmt & LMT_8Gb) 3443 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 3444 if (phba->lmt & LMT_4Gb) 3445 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 3446 if (phba->lmt & LMT_2Gb) 3447 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 3448 if (phba->lmt & LMT_1Gb) 3449 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 3450 3451 fc_host_maxframe_size(shost) = 3452 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 3453 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 3454 3455 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 3456 3457 /* This value is also unchanging */ 3458 memset(fc_host_active_fc4s(shost), 0, 3459 sizeof(fc_host_active_fc4s(shost))); 3460 fc_host_active_fc4s(shost)[2] = 1; 3461 fc_host_active_fc4s(shost)[7] = 1; 3462 3463 fc_host_max_npiv_vports(shost) = phba->max_vpi; 3464 spin_lock_irq(shost->host_lock); 3465 vport->load_flag &= ~FC_LOADING; 3466 spin_unlock_irq(shost->host_lock); 3467 } 3468 3469 /** 3470 * lpfc_stop_port_s3 - Stop SLI3 device port 3471 * @phba: pointer to lpfc hba data structure. 3472 * 3473 * This routine is invoked to stop an SLI3 device port, it stops the device 3474 * from generating interrupts and stops the device driver's timers for the 3475 * device. 3476 **/ 3477 static void 3478 lpfc_stop_port_s3(struct lpfc_hba *phba) 3479 { 3480 /* Clear all interrupt enable conditions */ 3481 writel(0, phba->HCregaddr); 3482 readl(phba->HCregaddr); /* flush */ 3483 /* Clear all pending interrupts */ 3484 writel(0xffffffff, phba->HAregaddr); 3485 readl(phba->HAregaddr); /* flush */ 3486 3487 /* Reset some HBA SLI setup states */ 3488 lpfc_stop_hba_timers(phba); 3489 phba->pport->work_port_events = 0; 3490 } 3491 3492 /** 3493 * lpfc_stop_port_s4 - Stop SLI4 device port 3494 * @phba: pointer to lpfc hba data structure. 3495 * 3496 * This routine is invoked to stop an SLI4 device port, it stops the device 3497 * from generating interrupts and stops the device driver's timers for the 3498 * device. 3499 **/ 3500 static void 3501 lpfc_stop_port_s4(struct lpfc_hba *phba) 3502 { 3503 /* Reset some HBA SLI4 setup states */ 3504 lpfc_stop_hba_timers(phba); 3505 phba->pport->work_port_events = 0; 3506 phba->sli4_hba.intr_enable = 0; 3507 } 3508 3509 /** 3510 * lpfc_stop_port - Wrapper function for stopping hba port 3511 * @phba: Pointer to HBA context object. 3512 * 3513 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 3514 * the API jump table function pointer from the lpfc_hba struct. 3515 **/ 3516 void 3517 lpfc_stop_port(struct lpfc_hba *phba) 3518 { 3519 phba->lpfc_stop_port(phba); 3520 } 3521 3522 /** 3523 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 3524 * @phba: Pointer to hba for which this call is being executed. 3525 * 3526 * This routine starts the timer waiting for the FCF rediscovery to complete. 3527 **/ 3528 void 3529 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 3530 { 3531 unsigned long fcf_redisc_wait_tmo = 3532 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 3533 /* Start fcf rediscovery wait period timer */ 3534 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 3535 spin_lock_irq(&phba->hbalock); 3536 /* Allow action to new fcf asynchronous event */ 3537 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 3538 /* Mark the FCF rediscovery pending state */ 3539 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 3540 spin_unlock_irq(&phba->hbalock); 3541 } 3542 3543 /** 3544 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 3545 * @ptr: Map to lpfc_hba data structure pointer. 3546 * 3547 * This routine is invoked when waiting for FCF table rediscover has been 3548 * timed out. If new FCF record(s) has (have) been discovered during the 3549 * wait period, a new FCF event shall be added to the FCOE async event 3550 * list, and then worker thread shall be waked up for processing from the 3551 * worker thread context. 3552 **/ 3553 void 3554 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3555 { 3556 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3557 3558 /* Don't send FCF rediscovery event if timer cancelled */ 3559 spin_lock_irq(&phba->hbalock); 3560 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3561 spin_unlock_irq(&phba->hbalock); 3562 return; 3563 } 3564 /* Clear FCF rediscovery timer pending flag */ 3565 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3566 /* FCF rediscovery event to worker thread */ 3567 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3568 spin_unlock_irq(&phba->hbalock); 3569 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3570 "2776 FCF rediscover quiescent timer expired\n"); 3571 /* wake up worker thread */ 3572 lpfc_worker_wake_up(phba); 3573 } 3574 3575 /** 3576 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3577 * @phba: pointer to lpfc hba data structure. 3578 * @acqe_link: pointer to the async link completion queue entry. 3579 * 3580 * This routine is to parse the SLI4 link-attention link fault code and 3581 * translate it into the base driver's read link attention mailbox command 3582 * status. 3583 * 3584 * Return: Link-attention status in terms of base driver's coding. 3585 **/ 3586 static uint16_t 3587 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3588 struct lpfc_acqe_link *acqe_link) 3589 { 3590 uint16_t latt_fault; 3591 3592 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3593 case LPFC_ASYNC_LINK_FAULT_NONE: 3594 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3595 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3596 latt_fault = 0; 3597 break; 3598 default: 3599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3600 "0398 Invalid link fault code: x%x\n", 3601 bf_get(lpfc_acqe_link_fault, acqe_link)); 3602 latt_fault = MBXERR_ERROR; 3603 break; 3604 } 3605 return latt_fault; 3606 } 3607 3608 /** 3609 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3610 * @phba: pointer to lpfc hba data structure. 3611 * @acqe_link: pointer to the async link completion queue entry. 3612 * 3613 * This routine is to parse the SLI4 link attention type and translate it 3614 * into the base driver's link attention type coding. 3615 * 3616 * Return: Link attention type in terms of base driver's coding. 3617 **/ 3618 static uint8_t 3619 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3620 struct lpfc_acqe_link *acqe_link) 3621 { 3622 uint8_t att_type; 3623 3624 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3625 case LPFC_ASYNC_LINK_STATUS_DOWN: 3626 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3627 att_type = LPFC_ATT_LINK_DOWN; 3628 break; 3629 case LPFC_ASYNC_LINK_STATUS_UP: 3630 /* Ignore physical link up events - wait for logical link up */ 3631 att_type = LPFC_ATT_RESERVED; 3632 break; 3633 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3634 att_type = LPFC_ATT_LINK_UP; 3635 break; 3636 default: 3637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3638 "0399 Invalid link attention type: x%x\n", 3639 bf_get(lpfc_acqe_link_status, acqe_link)); 3640 att_type = LPFC_ATT_RESERVED; 3641 break; 3642 } 3643 return att_type; 3644 } 3645 3646 /** 3647 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3648 * @phba: pointer to lpfc hba data structure. 3649 * @acqe_link: pointer to the async link completion queue entry. 3650 * 3651 * This routine is to parse the SLI4 link-attention link speed and translate 3652 * it into the base driver's link-attention link speed coding. 3653 * 3654 * Return: Link-attention link speed in terms of base driver's coding. 3655 **/ 3656 static uint8_t 3657 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3658 struct lpfc_acqe_link *acqe_link) 3659 { 3660 uint8_t link_speed; 3661 3662 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3663 case LPFC_ASYNC_LINK_SPEED_ZERO: 3664 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3665 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3666 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3667 break; 3668 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3669 link_speed = LPFC_LINK_SPEED_1GHZ; 3670 break; 3671 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3672 link_speed = LPFC_LINK_SPEED_10GHZ; 3673 break; 3674 default: 3675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3676 "0483 Invalid link-attention link speed: x%x\n", 3677 bf_get(lpfc_acqe_link_speed, acqe_link)); 3678 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3679 break; 3680 } 3681 return link_speed; 3682 } 3683 3684 /** 3685 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 3686 * @phba: pointer to lpfc hba data structure. 3687 * 3688 * This routine is to get an SLI3 FC port's link speed in Mbps. 3689 * 3690 * Return: link speed in terms of Mbps. 3691 **/ 3692 uint32_t 3693 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 3694 { 3695 uint32_t link_speed; 3696 3697 if (!lpfc_is_link_up(phba)) 3698 return 0; 3699 3700 switch (phba->fc_linkspeed) { 3701 case LPFC_LINK_SPEED_1GHZ: 3702 link_speed = 1000; 3703 break; 3704 case LPFC_LINK_SPEED_2GHZ: 3705 link_speed = 2000; 3706 break; 3707 case LPFC_LINK_SPEED_4GHZ: 3708 link_speed = 4000; 3709 break; 3710 case LPFC_LINK_SPEED_8GHZ: 3711 link_speed = 8000; 3712 break; 3713 case LPFC_LINK_SPEED_10GHZ: 3714 link_speed = 10000; 3715 break; 3716 case LPFC_LINK_SPEED_16GHZ: 3717 link_speed = 16000; 3718 break; 3719 default: 3720 link_speed = 0; 3721 } 3722 return link_speed; 3723 } 3724 3725 /** 3726 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 3727 * @phba: pointer to lpfc hba data structure. 3728 * @evt_code: asynchronous event code. 3729 * @speed_code: asynchronous event link speed code. 3730 * 3731 * This routine is to parse the giving SLI4 async event link speed code into 3732 * value of Mbps for the link speed. 3733 * 3734 * Return: link speed in terms of Mbps. 3735 **/ 3736 static uint32_t 3737 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 3738 uint8_t speed_code) 3739 { 3740 uint32_t port_speed; 3741 3742 switch (evt_code) { 3743 case LPFC_TRAILER_CODE_LINK: 3744 switch (speed_code) { 3745 case LPFC_EVT_CODE_LINK_NO_LINK: 3746 port_speed = 0; 3747 break; 3748 case LPFC_EVT_CODE_LINK_10_MBIT: 3749 port_speed = 10; 3750 break; 3751 case LPFC_EVT_CODE_LINK_100_MBIT: 3752 port_speed = 100; 3753 break; 3754 case LPFC_EVT_CODE_LINK_1_GBIT: 3755 port_speed = 1000; 3756 break; 3757 case LPFC_EVT_CODE_LINK_10_GBIT: 3758 port_speed = 10000; 3759 break; 3760 default: 3761 port_speed = 0; 3762 } 3763 break; 3764 case LPFC_TRAILER_CODE_FC: 3765 switch (speed_code) { 3766 case LPFC_EVT_CODE_FC_NO_LINK: 3767 port_speed = 0; 3768 break; 3769 case LPFC_EVT_CODE_FC_1_GBAUD: 3770 port_speed = 1000; 3771 break; 3772 case LPFC_EVT_CODE_FC_2_GBAUD: 3773 port_speed = 2000; 3774 break; 3775 case LPFC_EVT_CODE_FC_4_GBAUD: 3776 port_speed = 4000; 3777 break; 3778 case LPFC_EVT_CODE_FC_8_GBAUD: 3779 port_speed = 8000; 3780 break; 3781 case LPFC_EVT_CODE_FC_10_GBAUD: 3782 port_speed = 10000; 3783 break; 3784 case LPFC_EVT_CODE_FC_16_GBAUD: 3785 port_speed = 16000; 3786 break; 3787 default: 3788 port_speed = 0; 3789 } 3790 break; 3791 default: 3792 port_speed = 0; 3793 } 3794 return port_speed; 3795 } 3796 3797 /** 3798 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3799 * @phba: pointer to lpfc hba data structure. 3800 * @acqe_link: pointer to the async link completion queue entry. 3801 * 3802 * This routine is to handle the SLI4 asynchronous FCoE link event. 3803 **/ 3804 static void 3805 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3806 struct lpfc_acqe_link *acqe_link) 3807 { 3808 struct lpfc_dmabuf *mp; 3809 LPFC_MBOXQ_t *pmb; 3810 MAILBOX_t *mb; 3811 struct lpfc_mbx_read_top *la; 3812 uint8_t att_type; 3813 int rc; 3814 3815 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3816 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3817 return; 3818 phba->fcoe_eventtag = acqe_link->event_tag; 3819 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3820 if (!pmb) { 3821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3822 "0395 The mboxq allocation failed\n"); 3823 return; 3824 } 3825 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3826 if (!mp) { 3827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3828 "0396 The lpfc_dmabuf allocation failed\n"); 3829 goto out_free_pmb; 3830 } 3831 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3832 if (!mp->virt) { 3833 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3834 "0397 The mbuf allocation failed\n"); 3835 goto out_free_dmabuf; 3836 } 3837 3838 /* Cleanup any outstanding ELS commands */ 3839 lpfc_els_flush_all_cmd(phba); 3840 3841 /* Block ELS IOCBs until we have done process link event */ 3842 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3843 3844 /* Update link event statistics */ 3845 phba->sli.slistat.link_event++; 3846 3847 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3848 lpfc_read_topology(phba, pmb, mp); 3849 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3850 pmb->vport = phba->pport; 3851 3852 /* Keep the link status for extra SLI4 state machine reference */ 3853 phba->sli4_hba.link_state.speed = 3854 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 3855 bf_get(lpfc_acqe_link_speed, acqe_link)); 3856 phba->sli4_hba.link_state.duplex = 3857 bf_get(lpfc_acqe_link_duplex, acqe_link); 3858 phba->sli4_hba.link_state.status = 3859 bf_get(lpfc_acqe_link_status, acqe_link); 3860 phba->sli4_hba.link_state.type = 3861 bf_get(lpfc_acqe_link_type, acqe_link); 3862 phba->sli4_hba.link_state.number = 3863 bf_get(lpfc_acqe_link_number, acqe_link); 3864 phba->sli4_hba.link_state.fault = 3865 bf_get(lpfc_acqe_link_fault, acqe_link); 3866 phba->sli4_hba.link_state.logical_speed = 3867 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 3868 3869 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3870 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3871 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3872 "Logical speed:%dMbps Fault:%d\n", 3873 phba->sli4_hba.link_state.speed, 3874 phba->sli4_hba.link_state.topology, 3875 phba->sli4_hba.link_state.status, 3876 phba->sli4_hba.link_state.type, 3877 phba->sli4_hba.link_state.number, 3878 phba->sli4_hba.link_state.logical_speed, 3879 phba->sli4_hba.link_state.fault); 3880 /* 3881 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3882 * topology info. Note: Optional for non FC-AL ports. 3883 */ 3884 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3885 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3886 if (rc == MBX_NOT_FINISHED) 3887 goto out_free_dmabuf; 3888 return; 3889 } 3890 /* 3891 * For FCoE Mode: fill in all the topology information we need and call 3892 * the READ_TOPOLOGY completion routine to continue without actually 3893 * sending the READ_TOPOLOGY mailbox command to the port. 3894 */ 3895 /* Parse and translate status field */ 3896 mb = &pmb->u.mb; 3897 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3898 3899 /* Parse and translate link attention fields */ 3900 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3901 la->eventTag = acqe_link->event_tag; 3902 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3903 bf_set(lpfc_mbx_read_top_link_spd, la, 3904 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3905 3906 /* Fake the the following irrelvant fields */ 3907 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3908 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3909 bf_set(lpfc_mbx_read_top_il, la, 0); 3910 bf_set(lpfc_mbx_read_top_pb, la, 0); 3911 bf_set(lpfc_mbx_read_top_fa, la, 0); 3912 bf_set(lpfc_mbx_read_top_mm, la, 0); 3913 3914 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3915 lpfc_mbx_cmpl_read_topology(phba, pmb); 3916 3917 return; 3918 3919 out_free_dmabuf: 3920 kfree(mp); 3921 out_free_pmb: 3922 mempool_free(pmb, phba->mbox_mem_pool); 3923 } 3924 3925 /** 3926 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3927 * @phba: pointer to lpfc hba data structure. 3928 * @acqe_fc: pointer to the async fc completion queue entry. 3929 * 3930 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 3931 * that the event was received and then issue a read_topology mailbox command so 3932 * that the rest of the driver will treat it the same as SLI3. 3933 **/ 3934 static void 3935 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 3936 { 3937 struct lpfc_dmabuf *mp; 3938 LPFC_MBOXQ_t *pmb; 3939 int rc; 3940 3941 if (bf_get(lpfc_trailer_type, acqe_fc) != 3942 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 3943 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3944 "2895 Non FC link Event detected.(%d)\n", 3945 bf_get(lpfc_trailer_type, acqe_fc)); 3946 return; 3947 } 3948 /* Keep the link status for extra SLI4 state machine reference */ 3949 phba->sli4_hba.link_state.speed = 3950 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 3951 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 3952 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3953 phba->sli4_hba.link_state.topology = 3954 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3955 phba->sli4_hba.link_state.status = 3956 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 3957 phba->sli4_hba.link_state.type = 3958 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 3959 phba->sli4_hba.link_state.number = 3960 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 3961 phba->sli4_hba.link_state.fault = 3962 bf_get(lpfc_acqe_link_fault, acqe_fc); 3963 phba->sli4_hba.link_state.logical_speed = 3964 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 3965 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3966 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3967 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3968 "%dMbps Fault:%d\n", 3969 phba->sli4_hba.link_state.speed, 3970 phba->sli4_hba.link_state.topology, 3971 phba->sli4_hba.link_state.status, 3972 phba->sli4_hba.link_state.type, 3973 phba->sli4_hba.link_state.number, 3974 phba->sli4_hba.link_state.logical_speed, 3975 phba->sli4_hba.link_state.fault); 3976 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3977 if (!pmb) { 3978 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3979 "2897 The mboxq allocation failed\n"); 3980 return; 3981 } 3982 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3983 if (!mp) { 3984 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3985 "2898 The lpfc_dmabuf allocation failed\n"); 3986 goto out_free_pmb; 3987 } 3988 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3989 if (!mp->virt) { 3990 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3991 "2899 The mbuf allocation failed\n"); 3992 goto out_free_dmabuf; 3993 } 3994 3995 /* Cleanup any outstanding ELS commands */ 3996 lpfc_els_flush_all_cmd(phba); 3997 3998 /* Block ELS IOCBs until we have done process link event */ 3999 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 4000 4001 /* Update link event statistics */ 4002 phba->sli.slistat.link_event++; 4003 4004 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4005 lpfc_read_topology(phba, pmb, mp); 4006 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4007 pmb->vport = phba->pport; 4008 4009 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4010 if (rc == MBX_NOT_FINISHED) 4011 goto out_free_dmabuf; 4012 return; 4013 4014 out_free_dmabuf: 4015 kfree(mp); 4016 out_free_pmb: 4017 mempool_free(pmb, phba->mbox_mem_pool); 4018 } 4019 4020 /** 4021 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 4022 * @phba: pointer to lpfc hba data structure. 4023 * @acqe_fc: pointer to the async SLI completion queue entry. 4024 * 4025 * This routine is to handle the SLI4 asynchronous SLI events. 4026 **/ 4027 static void 4028 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 4029 { 4030 char port_name; 4031 char message[128]; 4032 uint8_t status; 4033 struct lpfc_acqe_misconfigured_event *misconfigured; 4034 4035 /* special case misconfigured event as it contains data for all ports */ 4036 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 4037 LPFC_SLI_INTF_IF_TYPE_2) || 4038 (bf_get(lpfc_trailer_type, acqe_sli) != 4039 LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) { 4040 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4041 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 4042 "x%08x SLI Event Type:%d\n", 4043 acqe_sli->event_data1, acqe_sli->event_data2, 4044 bf_get(lpfc_trailer_type, acqe_sli)); 4045 return; 4046 } 4047 4048 port_name = phba->Port[0]; 4049 if (port_name == 0x00) 4050 port_name = '?'; /* get port name is empty */ 4051 4052 misconfigured = (struct lpfc_acqe_misconfigured_event *) 4053 &acqe_sli->event_data1; 4054 4055 /* fetch the status for this port */ 4056 switch (phba->sli4_hba.lnk_info.lnk_no) { 4057 case LPFC_LINK_NUMBER_0: 4058 status = bf_get(lpfc_sli_misconfigured_port0, 4059 &misconfigured->theEvent); 4060 break; 4061 case LPFC_LINK_NUMBER_1: 4062 status = bf_get(lpfc_sli_misconfigured_port1, 4063 &misconfigured->theEvent); 4064 break; 4065 case LPFC_LINK_NUMBER_2: 4066 status = bf_get(lpfc_sli_misconfigured_port2, 4067 &misconfigured->theEvent); 4068 break; 4069 case LPFC_LINK_NUMBER_3: 4070 status = bf_get(lpfc_sli_misconfigured_port3, 4071 &misconfigured->theEvent); 4072 break; 4073 default: 4074 status = ~LPFC_SLI_EVENT_STATUS_VALID; 4075 break; 4076 } 4077 4078 switch (status) { 4079 case LPFC_SLI_EVENT_STATUS_VALID: 4080 return; /* no message if the sfp is okay */ 4081 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 4082 sprintf(message, "Optics faulted/incorrectly installed/not " \ 4083 "installed - Reseat optics, if issue not " 4084 "resolved, replace."); 4085 break; 4086 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 4087 sprintf(message, 4088 "Optics of two types installed - Remove one optic or " \ 4089 "install matching pair of optics."); 4090 break; 4091 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 4092 sprintf(message, "Incompatible optics - Replace with " \ 4093 "compatible optics for card to function."); 4094 break; 4095 default: 4096 /* firmware is reporting a status we don't know about */ 4097 sprintf(message, "Unknown event status x%02x", status); 4098 break; 4099 } 4100 4101 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4102 "3176 Misconfigured Physical Port - " 4103 "Port Name %c %s\n", port_name, message); 4104 } 4105 4106 /** 4107 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 4108 * @vport: pointer to vport data structure. 4109 * 4110 * This routine is to perform Clear Virtual Link (CVL) on a vport in 4111 * response to a CVL event. 4112 * 4113 * Return the pointer to the ndlp with the vport if successful, otherwise 4114 * return NULL. 4115 **/ 4116 static struct lpfc_nodelist * 4117 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 4118 { 4119 struct lpfc_nodelist *ndlp; 4120 struct Scsi_Host *shost; 4121 struct lpfc_hba *phba; 4122 4123 if (!vport) 4124 return NULL; 4125 phba = vport->phba; 4126 if (!phba) 4127 return NULL; 4128 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4129 if (!ndlp) { 4130 /* Cannot find existing Fabric ndlp, so allocate a new one */ 4131 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4132 if (!ndlp) 4133 return 0; 4134 lpfc_nlp_init(vport, ndlp, Fabric_DID); 4135 /* Set the node type */ 4136 ndlp->nlp_type |= NLP_FABRIC; 4137 /* Put ndlp onto node list */ 4138 lpfc_enqueue_node(vport, ndlp); 4139 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4140 /* re-setup ndlp without removing from node list */ 4141 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4142 if (!ndlp) 4143 return 0; 4144 } 4145 if ((phba->pport->port_state < LPFC_FLOGI) && 4146 (phba->pport->port_state != LPFC_VPORT_FAILED)) 4147 return NULL; 4148 /* If virtual link is not yet instantiated ignore CVL */ 4149 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 4150 && (vport->port_state != LPFC_VPORT_FAILED)) 4151 return NULL; 4152 shost = lpfc_shost_from_vport(vport); 4153 if (!shost) 4154 return NULL; 4155 lpfc_linkdown_port(vport); 4156 lpfc_cleanup_pending_mbox(vport); 4157 spin_lock_irq(shost->host_lock); 4158 vport->fc_flag |= FC_VPORT_CVL_RCVD; 4159 spin_unlock_irq(shost->host_lock); 4160 4161 return ndlp; 4162 } 4163 4164 /** 4165 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 4166 * @vport: pointer to lpfc hba data structure. 4167 * 4168 * This routine is to perform Clear Virtual Link (CVL) on all vports in 4169 * response to a FCF dead event. 4170 **/ 4171 static void 4172 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 4173 { 4174 struct lpfc_vport **vports; 4175 int i; 4176 4177 vports = lpfc_create_vport_work_array(phba); 4178 if (vports) 4179 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 4180 lpfc_sli4_perform_vport_cvl(vports[i]); 4181 lpfc_destroy_vport_work_array(phba, vports); 4182 } 4183 4184 /** 4185 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4186 * @phba: pointer to lpfc hba data structure. 4187 * @acqe_link: pointer to the async fcoe completion queue entry. 4188 * 4189 * This routine is to handle the SLI4 asynchronous fcoe event. 4190 **/ 4191 static void 4192 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 4193 struct lpfc_acqe_fip *acqe_fip) 4194 { 4195 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 4196 int rc; 4197 struct lpfc_vport *vport; 4198 struct lpfc_nodelist *ndlp; 4199 struct Scsi_Host *shost; 4200 int active_vlink_present; 4201 struct lpfc_vport **vports; 4202 int i; 4203 4204 phba->fc_eventTag = acqe_fip->event_tag; 4205 phba->fcoe_eventtag = acqe_fip->event_tag; 4206 switch (event_type) { 4207 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 4208 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 4209 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 4210 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4211 LOG_DISCOVERY, 4212 "2546 New FCF event, evt_tag:x%x, " 4213 "index:x%x\n", 4214 acqe_fip->event_tag, 4215 acqe_fip->index); 4216 else 4217 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 4218 LOG_DISCOVERY, 4219 "2788 FCF param modified event, " 4220 "evt_tag:x%x, index:x%x\n", 4221 acqe_fip->event_tag, 4222 acqe_fip->index); 4223 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4224 /* 4225 * During period of FCF discovery, read the FCF 4226 * table record indexed by the event to update 4227 * FCF roundrobin failover eligible FCF bmask. 4228 */ 4229 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4230 LOG_DISCOVERY, 4231 "2779 Read FCF (x%x) for updating " 4232 "roundrobin FCF failover bmask\n", 4233 acqe_fip->index); 4234 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 4235 } 4236 4237 /* If the FCF discovery is in progress, do nothing. */ 4238 spin_lock_irq(&phba->hbalock); 4239 if (phba->hba_flag & FCF_TS_INPROG) { 4240 spin_unlock_irq(&phba->hbalock); 4241 break; 4242 } 4243 /* If fast FCF failover rescan event is pending, do nothing */ 4244 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 4245 spin_unlock_irq(&phba->hbalock); 4246 break; 4247 } 4248 4249 /* If the FCF has been in discovered state, do nothing. */ 4250 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 4251 spin_unlock_irq(&phba->hbalock); 4252 break; 4253 } 4254 spin_unlock_irq(&phba->hbalock); 4255 4256 /* Otherwise, scan the entire FCF table and re-discover SAN */ 4257 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4258 "2770 Start FCF table scan per async FCF " 4259 "event, evt_tag:x%x, index:x%x\n", 4260 acqe_fip->event_tag, acqe_fip->index); 4261 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 4262 LPFC_FCOE_FCF_GET_FIRST); 4263 if (rc) 4264 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4265 "2547 Issue FCF scan read FCF mailbox " 4266 "command failed (x%x)\n", rc); 4267 break; 4268 4269 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 4270 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4271 "2548 FCF Table full count 0x%x tag 0x%x\n", 4272 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 4273 acqe_fip->event_tag); 4274 break; 4275 4276 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 4277 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4278 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4279 "2549 FCF (x%x) disconnected from network, " 4280 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 4281 /* 4282 * If we are in the middle of FCF failover process, clear 4283 * the corresponding FCF bit in the roundrobin bitmap. 4284 */ 4285 spin_lock_irq(&phba->hbalock); 4286 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4287 spin_unlock_irq(&phba->hbalock); 4288 /* Update FLOGI FCF failover eligible FCF bmask */ 4289 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 4290 break; 4291 } 4292 spin_unlock_irq(&phba->hbalock); 4293 4294 /* If the event is not for currently used fcf do nothing */ 4295 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 4296 break; 4297 4298 /* 4299 * Otherwise, request the port to rediscover the entire FCF 4300 * table for a fast recovery from case that the current FCF 4301 * is no longer valid as we are not in the middle of FCF 4302 * failover process already. 4303 */ 4304 spin_lock_irq(&phba->hbalock); 4305 /* Mark the fast failover process in progress */ 4306 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 4307 spin_unlock_irq(&phba->hbalock); 4308 4309 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4310 "2771 Start FCF fast failover process due to " 4311 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 4312 "\n", acqe_fip->event_tag, acqe_fip->index); 4313 rc = lpfc_sli4_redisc_fcf_table(phba); 4314 if (rc) { 4315 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4316 LOG_DISCOVERY, 4317 "2772 Issue FCF rediscover mabilbox " 4318 "command failed, fail through to FCF " 4319 "dead event\n"); 4320 spin_lock_irq(&phba->hbalock); 4321 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 4322 spin_unlock_irq(&phba->hbalock); 4323 /* 4324 * Last resort will fail over by treating this 4325 * as a link down to FCF registration. 4326 */ 4327 lpfc_sli4_fcf_dead_failthrough(phba); 4328 } else { 4329 /* Reset FCF roundrobin bmask for new discovery */ 4330 lpfc_sli4_clear_fcf_rr_bmask(phba); 4331 /* 4332 * Handling fast FCF failover to a DEAD FCF event is 4333 * considered equalivant to receiving CVL to all vports. 4334 */ 4335 lpfc_sli4_perform_all_vport_cvl(phba); 4336 } 4337 break; 4338 case LPFC_FIP_EVENT_TYPE_CVL: 4339 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4340 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4341 "2718 Clear Virtual Link Received for VPI 0x%x" 4342 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 4343 4344 vport = lpfc_find_vport_by_vpid(phba, 4345 acqe_fip->index); 4346 ndlp = lpfc_sli4_perform_vport_cvl(vport); 4347 if (!ndlp) 4348 break; 4349 active_vlink_present = 0; 4350 4351 vports = lpfc_create_vport_work_array(phba); 4352 if (vports) { 4353 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 4354 i++) { 4355 if ((!(vports[i]->fc_flag & 4356 FC_VPORT_CVL_RCVD)) && 4357 (vports[i]->port_state > LPFC_FDISC)) { 4358 active_vlink_present = 1; 4359 break; 4360 } 4361 } 4362 lpfc_destroy_vport_work_array(phba, vports); 4363 } 4364 4365 if (active_vlink_present) { 4366 /* 4367 * If there are other active VLinks present, 4368 * re-instantiate the Vlink using FDISC. 4369 */ 4370 mod_timer(&ndlp->nlp_delayfunc, 4371 jiffies + msecs_to_jiffies(1000)); 4372 shost = lpfc_shost_from_vport(vport); 4373 spin_lock_irq(shost->host_lock); 4374 ndlp->nlp_flag |= NLP_DELAY_TMO; 4375 spin_unlock_irq(shost->host_lock); 4376 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 4377 vport->port_state = LPFC_FDISC; 4378 } else { 4379 /* 4380 * Otherwise, we request port to rediscover 4381 * the entire FCF table for a fast recovery 4382 * from possible case that the current FCF 4383 * is no longer valid if we are not already 4384 * in the FCF failover process. 4385 */ 4386 spin_lock_irq(&phba->hbalock); 4387 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4388 spin_unlock_irq(&phba->hbalock); 4389 break; 4390 } 4391 /* Mark the fast failover process in progress */ 4392 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 4393 spin_unlock_irq(&phba->hbalock); 4394 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4395 LOG_DISCOVERY, 4396 "2773 Start FCF failover per CVL, " 4397 "evt_tag:x%x\n", acqe_fip->event_tag); 4398 rc = lpfc_sli4_redisc_fcf_table(phba); 4399 if (rc) { 4400 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4401 LOG_DISCOVERY, 4402 "2774 Issue FCF rediscover " 4403 "mabilbox command failed, " 4404 "through to CVL event\n"); 4405 spin_lock_irq(&phba->hbalock); 4406 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 4407 spin_unlock_irq(&phba->hbalock); 4408 /* 4409 * Last resort will be re-try on the 4410 * the current registered FCF entry. 4411 */ 4412 lpfc_retry_pport_discovery(phba); 4413 } else 4414 /* 4415 * Reset FCF roundrobin bmask for new 4416 * discovery. 4417 */ 4418 lpfc_sli4_clear_fcf_rr_bmask(phba); 4419 } 4420 break; 4421 default: 4422 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4423 "0288 Unknown FCoE event type 0x%x event tag " 4424 "0x%x\n", event_type, acqe_fip->event_tag); 4425 break; 4426 } 4427 } 4428 4429 /** 4430 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 4431 * @phba: pointer to lpfc hba data structure. 4432 * @acqe_link: pointer to the async dcbx completion queue entry. 4433 * 4434 * This routine is to handle the SLI4 asynchronous dcbx event. 4435 **/ 4436 static void 4437 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 4438 struct lpfc_acqe_dcbx *acqe_dcbx) 4439 { 4440 phba->fc_eventTag = acqe_dcbx->event_tag; 4441 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4442 "0290 The SLI4 DCBX asynchronous event is not " 4443 "handled yet\n"); 4444 } 4445 4446 /** 4447 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 4448 * @phba: pointer to lpfc hba data structure. 4449 * @acqe_link: pointer to the async grp5 completion queue entry. 4450 * 4451 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 4452 * is an asynchronous notified of a logical link speed change. The Port 4453 * reports the logical link speed in units of 10Mbps. 4454 **/ 4455 static void 4456 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 4457 struct lpfc_acqe_grp5 *acqe_grp5) 4458 { 4459 uint16_t prev_ll_spd; 4460 4461 phba->fc_eventTag = acqe_grp5->event_tag; 4462 phba->fcoe_eventtag = acqe_grp5->event_tag; 4463 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 4464 phba->sli4_hba.link_state.logical_speed = 4465 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 4466 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4467 "2789 GRP5 Async Event: Updating logical link speed " 4468 "from %dMbps to %dMbps\n", prev_ll_spd, 4469 phba->sli4_hba.link_state.logical_speed); 4470 } 4471 4472 /** 4473 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 4474 * @phba: pointer to lpfc hba data structure. 4475 * 4476 * This routine is invoked by the worker thread to process all the pending 4477 * SLI4 asynchronous events. 4478 **/ 4479 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 4480 { 4481 struct lpfc_cq_event *cq_event; 4482 4483 /* First, declare the async event has been handled */ 4484 spin_lock_irq(&phba->hbalock); 4485 phba->hba_flag &= ~ASYNC_EVENT; 4486 spin_unlock_irq(&phba->hbalock); 4487 /* Now, handle all the async events */ 4488 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 4489 /* Get the first event from the head of the event queue */ 4490 spin_lock_irq(&phba->hbalock); 4491 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 4492 cq_event, struct lpfc_cq_event, list); 4493 spin_unlock_irq(&phba->hbalock); 4494 /* Process the asynchronous event */ 4495 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 4496 case LPFC_TRAILER_CODE_LINK: 4497 lpfc_sli4_async_link_evt(phba, 4498 &cq_event->cqe.acqe_link); 4499 break; 4500 case LPFC_TRAILER_CODE_FCOE: 4501 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 4502 break; 4503 case LPFC_TRAILER_CODE_DCBX: 4504 lpfc_sli4_async_dcbx_evt(phba, 4505 &cq_event->cqe.acqe_dcbx); 4506 break; 4507 case LPFC_TRAILER_CODE_GRP5: 4508 lpfc_sli4_async_grp5_evt(phba, 4509 &cq_event->cqe.acqe_grp5); 4510 break; 4511 case LPFC_TRAILER_CODE_FC: 4512 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 4513 break; 4514 case LPFC_TRAILER_CODE_SLI: 4515 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 4516 break; 4517 default: 4518 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4519 "1804 Invalid asynchrous event code: " 4520 "x%x\n", bf_get(lpfc_trailer_code, 4521 &cq_event->cqe.mcqe_cmpl)); 4522 break; 4523 } 4524 /* Free the completion event processed to the free pool */ 4525 lpfc_sli4_cq_event_release(phba, cq_event); 4526 } 4527 } 4528 4529 /** 4530 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 4531 * @phba: pointer to lpfc hba data structure. 4532 * 4533 * This routine is invoked by the worker thread to process FCF table 4534 * rediscovery pending completion event. 4535 **/ 4536 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 4537 { 4538 int rc; 4539 4540 spin_lock_irq(&phba->hbalock); 4541 /* Clear FCF rediscovery timeout event */ 4542 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 4543 /* Clear driver fast failover FCF record flag */ 4544 phba->fcf.failover_rec.flag = 0; 4545 /* Set state for FCF fast failover */ 4546 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 4547 spin_unlock_irq(&phba->hbalock); 4548 4549 /* Scan FCF table from the first entry to re-discover SAN */ 4550 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4551 "2777 Start post-quiescent FCF table scan\n"); 4552 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 4553 if (rc) 4554 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4555 "2747 Issue FCF scan read FCF mailbox " 4556 "command failed 0x%x\n", rc); 4557 } 4558 4559 /** 4560 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 4561 * @phba: pointer to lpfc hba data structure. 4562 * @dev_grp: The HBA PCI-Device group number. 4563 * 4564 * This routine is invoked to set up the per HBA PCI-Device group function 4565 * API jump table entries. 4566 * 4567 * Return: 0 if success, otherwise -ENODEV 4568 **/ 4569 int 4570 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4571 { 4572 int rc; 4573 4574 /* Set up lpfc PCI-device group */ 4575 phba->pci_dev_grp = dev_grp; 4576 4577 /* The LPFC_PCI_DEV_OC uses SLI4 */ 4578 if (dev_grp == LPFC_PCI_DEV_OC) 4579 phba->sli_rev = LPFC_SLI_REV4; 4580 4581 /* Set up device INIT API function jump table */ 4582 rc = lpfc_init_api_table_setup(phba, dev_grp); 4583 if (rc) 4584 return -ENODEV; 4585 /* Set up SCSI API function jump table */ 4586 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 4587 if (rc) 4588 return -ENODEV; 4589 /* Set up SLI API function jump table */ 4590 rc = lpfc_sli_api_table_setup(phba, dev_grp); 4591 if (rc) 4592 return -ENODEV; 4593 /* Set up MBOX API function jump table */ 4594 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 4595 if (rc) 4596 return -ENODEV; 4597 4598 return 0; 4599 } 4600 4601 /** 4602 * lpfc_log_intr_mode - Log the active interrupt mode 4603 * @phba: pointer to lpfc hba data structure. 4604 * @intr_mode: active interrupt mode adopted. 4605 * 4606 * This routine it invoked to log the currently used active interrupt mode 4607 * to the device. 4608 **/ 4609 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 4610 { 4611 switch (intr_mode) { 4612 case 0: 4613 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4614 "0470 Enable INTx interrupt mode.\n"); 4615 break; 4616 case 1: 4617 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4618 "0481 Enabled MSI interrupt mode.\n"); 4619 break; 4620 case 2: 4621 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4622 "0480 Enabled MSI-X interrupt mode.\n"); 4623 break; 4624 default: 4625 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4626 "0482 Illegal interrupt mode.\n"); 4627 break; 4628 } 4629 return; 4630 } 4631 4632 /** 4633 * lpfc_enable_pci_dev - Enable a generic PCI device. 4634 * @phba: pointer to lpfc hba data structure. 4635 * 4636 * This routine is invoked to enable the PCI device that is common to all 4637 * PCI devices. 4638 * 4639 * Return codes 4640 * 0 - successful 4641 * other values - error 4642 **/ 4643 static int 4644 lpfc_enable_pci_dev(struct lpfc_hba *phba) 4645 { 4646 struct pci_dev *pdev; 4647 int bars = 0; 4648 4649 /* Obtain PCI device reference */ 4650 if (!phba->pcidev) 4651 goto out_error; 4652 else 4653 pdev = phba->pcidev; 4654 /* Select PCI BARs */ 4655 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4656 /* Enable PCI device */ 4657 if (pci_enable_device_mem(pdev)) 4658 goto out_error; 4659 /* Request PCI resource for the device */ 4660 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 4661 goto out_disable_device; 4662 /* Set up device as PCI master and save state for EEH */ 4663 pci_set_master(pdev); 4664 pci_try_set_mwi(pdev); 4665 pci_save_state(pdev); 4666 4667 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4668 if (pci_is_pcie(pdev)) 4669 pdev->needs_freset = 1; 4670 4671 return 0; 4672 4673 out_disable_device: 4674 pci_disable_device(pdev); 4675 out_error: 4676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4677 "1401 Failed to enable pci device, bars:x%x\n", bars); 4678 return -ENODEV; 4679 } 4680 4681 /** 4682 * lpfc_disable_pci_dev - Disable a generic PCI device. 4683 * @phba: pointer to lpfc hba data structure. 4684 * 4685 * This routine is invoked to disable the PCI device that is common to all 4686 * PCI devices. 4687 **/ 4688 static void 4689 lpfc_disable_pci_dev(struct lpfc_hba *phba) 4690 { 4691 struct pci_dev *pdev; 4692 int bars; 4693 4694 /* Obtain PCI device reference */ 4695 if (!phba->pcidev) 4696 return; 4697 else 4698 pdev = phba->pcidev; 4699 /* Select PCI BARs */ 4700 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4701 /* Release PCI resource and disable PCI device */ 4702 pci_release_selected_regions(pdev, bars); 4703 pci_disable_device(pdev); 4704 4705 return; 4706 } 4707 4708 /** 4709 * lpfc_reset_hba - Reset a hba 4710 * @phba: pointer to lpfc hba data structure. 4711 * 4712 * This routine is invoked to reset a hba device. It brings the HBA 4713 * offline, performs a board restart, and then brings the board back 4714 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 4715 * on outstanding mailbox commands. 4716 **/ 4717 void 4718 lpfc_reset_hba(struct lpfc_hba *phba) 4719 { 4720 /* If resets are disabled then set error state and return. */ 4721 if (!phba->cfg_enable_hba_reset) { 4722 phba->link_state = LPFC_HBA_ERROR; 4723 return; 4724 } 4725 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 4726 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4727 else 4728 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 4729 lpfc_offline(phba); 4730 lpfc_sli_brdrestart(phba); 4731 lpfc_online(phba); 4732 lpfc_unblock_mgmt_io(phba); 4733 } 4734 4735 /** 4736 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 4737 * @phba: pointer to lpfc hba data structure. 4738 * 4739 * This function enables the PCI SR-IOV virtual functions to a physical 4740 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4741 * enable the number of virtual functions to the physical function. As 4742 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4743 * API call does not considered as an error condition for most of the device. 4744 **/ 4745 uint16_t 4746 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 4747 { 4748 struct pci_dev *pdev = phba->pcidev; 4749 uint16_t nr_virtfn; 4750 int pos; 4751 4752 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4753 if (pos == 0) 4754 return 0; 4755 4756 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 4757 return nr_virtfn; 4758 } 4759 4760 /** 4761 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4762 * @phba: pointer to lpfc hba data structure. 4763 * @nr_vfn: number of virtual functions to be enabled. 4764 * 4765 * This function enables the PCI SR-IOV virtual functions to a physical 4766 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4767 * enable the number of virtual functions to the physical function. As 4768 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4769 * API call does not considered as an error condition for most of the device. 4770 **/ 4771 int 4772 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4773 { 4774 struct pci_dev *pdev = phba->pcidev; 4775 uint16_t max_nr_vfn; 4776 int rc; 4777 4778 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 4779 if (nr_vfn > max_nr_vfn) { 4780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4781 "3057 Requested vfs (%d) greater than " 4782 "supported vfs (%d)", nr_vfn, max_nr_vfn); 4783 return -EINVAL; 4784 } 4785 4786 rc = pci_enable_sriov(pdev, nr_vfn); 4787 if (rc) { 4788 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4789 "2806 Failed to enable sriov on this device " 4790 "with vfn number nr_vf:%d, rc:%d\n", 4791 nr_vfn, rc); 4792 } else 4793 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4794 "2807 Successful enable sriov on this device " 4795 "with vfn number nr_vf:%d\n", nr_vfn); 4796 return rc; 4797 } 4798 4799 /** 4800 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4801 * @phba: pointer to lpfc hba data structure. 4802 * 4803 * This routine is invoked to set up the driver internal resources specific to 4804 * support the SLI-3 HBA device it attached to. 4805 * 4806 * Return codes 4807 * 0 - successful 4808 * other values - error 4809 **/ 4810 static int 4811 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4812 { 4813 struct lpfc_sli *psli; 4814 int rc; 4815 4816 /* 4817 * Initialize timers used by driver 4818 */ 4819 4820 /* Heartbeat timer */ 4821 init_timer(&phba->hb_tmofunc); 4822 phba->hb_tmofunc.function = lpfc_hb_timeout; 4823 phba->hb_tmofunc.data = (unsigned long)phba; 4824 4825 psli = &phba->sli; 4826 /* MBOX heartbeat timer */ 4827 init_timer(&psli->mbox_tmo); 4828 psli->mbox_tmo.function = lpfc_mbox_timeout; 4829 psli->mbox_tmo.data = (unsigned long) phba; 4830 /* FCP polling mode timer */ 4831 init_timer(&phba->fcp_poll_timer); 4832 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4833 phba->fcp_poll_timer.data = (unsigned long) phba; 4834 /* Fabric block timer */ 4835 init_timer(&phba->fabric_block_timer); 4836 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4837 phba->fabric_block_timer.data = (unsigned long) phba; 4838 /* EA polling mode timer */ 4839 init_timer(&phba->eratt_poll); 4840 phba->eratt_poll.function = lpfc_poll_eratt; 4841 phba->eratt_poll.data = (unsigned long) phba; 4842 4843 /* Host attention work mask setup */ 4844 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4845 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4846 4847 /* Get all the module params for configuring this host */ 4848 lpfc_get_cfgparam(phba); 4849 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4850 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4851 /* check for menlo minimum sg count */ 4852 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4853 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4854 } 4855 4856 if (!phba->sli.ring) 4857 phba->sli.ring = (struct lpfc_sli_ring *) 4858 kzalloc(LPFC_SLI3_MAX_RING * 4859 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4860 if (!phba->sli.ring) 4861 return -ENOMEM; 4862 4863 /* 4864 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 4865 * used to create the sg_dma_buf_pool must be dynamically calculated. 4866 */ 4867 4868 /* Initialize the host templates the configured values. */ 4869 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4870 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4871 4872 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 4873 if (phba->cfg_enable_bg) { 4874 /* 4875 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 4876 * the FCP rsp, and a BDE for each. Sice we have no control 4877 * over how many protection data segments the SCSI Layer 4878 * will hand us (ie: there could be one for every block 4879 * in the IO), we just allocate enough BDEs to accomidate 4880 * our max amount and we need to limit lpfc_sg_seg_cnt to 4881 * minimize the risk of running out. 4882 */ 4883 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4884 sizeof(struct fcp_rsp) + 4885 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); 4886 4887 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 4888 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 4889 4890 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 4891 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4892 } else { 4893 /* 4894 * The scsi_buf for a regular I/O will hold the FCP cmnd, 4895 * the FCP rsp, a BDE for each, and a BDE for up to 4896 * cfg_sg_seg_cnt data segments. 4897 */ 4898 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4899 sizeof(struct fcp_rsp) + 4900 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4901 4902 /* Total BDEs in BPL for scsi_sg_list */ 4903 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 4904 } 4905 4906 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4907 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 4908 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 4909 phba->cfg_total_seg_cnt); 4910 4911 phba->max_vpi = LPFC_MAX_VPI; 4912 /* This will be set to correct value after config_port mbox */ 4913 phba->max_vports = 0; 4914 4915 /* 4916 * Initialize the SLI Layer to run with lpfc HBAs. 4917 */ 4918 lpfc_sli_setup(phba); 4919 lpfc_sli_queue_setup(phba); 4920 4921 /* Allocate device driver memory */ 4922 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4923 return -ENOMEM; 4924 4925 /* 4926 * Enable sr-iov virtual functions if supported and configured 4927 * through the module parameter. 4928 */ 4929 if (phba->cfg_sriov_nr_virtfn > 0) { 4930 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 4931 phba->cfg_sriov_nr_virtfn); 4932 if (rc) { 4933 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4934 "2808 Requested number of SR-IOV " 4935 "virtual functions (%d) is not " 4936 "supported\n", 4937 phba->cfg_sriov_nr_virtfn); 4938 phba->cfg_sriov_nr_virtfn = 0; 4939 } 4940 } 4941 4942 return 0; 4943 } 4944 4945 /** 4946 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 4947 * @phba: pointer to lpfc hba data structure. 4948 * 4949 * This routine is invoked to unset the driver internal resources set up 4950 * specific for supporting the SLI-3 HBA device it attached to. 4951 **/ 4952 static void 4953 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 4954 { 4955 /* Free device driver memory allocated */ 4956 lpfc_mem_free_all(phba); 4957 4958 return; 4959 } 4960 4961 /** 4962 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 4963 * @phba: pointer to lpfc hba data structure. 4964 * 4965 * This routine is invoked to set up the driver internal resources specific to 4966 * support the SLI-4 HBA device it attached to. 4967 * 4968 * Return codes 4969 * 0 - successful 4970 * other values - error 4971 **/ 4972 static int 4973 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4974 { 4975 struct lpfc_vector_map_info *cpup; 4976 struct lpfc_sli *psli; 4977 LPFC_MBOXQ_t *mboxq; 4978 int rc, i, hbq_count, max_buf_size; 4979 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4980 struct lpfc_mqe *mqe; 4981 int longs; 4982 int fof_vectors = 0; 4983 4984 /* Get all the module params for configuring this host */ 4985 lpfc_get_cfgparam(phba); 4986 4987 /* Before proceed, wait for POST done and device ready */ 4988 rc = lpfc_sli4_post_status_check(phba); 4989 if (rc) 4990 return -ENODEV; 4991 4992 /* 4993 * Initialize timers used by driver 4994 */ 4995 4996 /* Heartbeat timer */ 4997 init_timer(&phba->hb_tmofunc); 4998 phba->hb_tmofunc.function = lpfc_hb_timeout; 4999 phba->hb_tmofunc.data = (unsigned long)phba; 5000 init_timer(&phba->rrq_tmr); 5001 phba->rrq_tmr.function = lpfc_rrq_timeout; 5002 phba->rrq_tmr.data = (unsigned long)phba; 5003 5004 psli = &phba->sli; 5005 /* MBOX heartbeat timer */ 5006 init_timer(&psli->mbox_tmo); 5007 psli->mbox_tmo.function = lpfc_mbox_timeout; 5008 psli->mbox_tmo.data = (unsigned long) phba; 5009 /* Fabric block timer */ 5010 init_timer(&phba->fabric_block_timer); 5011 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 5012 phba->fabric_block_timer.data = (unsigned long) phba; 5013 /* EA polling mode timer */ 5014 init_timer(&phba->eratt_poll); 5015 phba->eratt_poll.function = lpfc_poll_eratt; 5016 phba->eratt_poll.data = (unsigned long) phba; 5017 /* FCF rediscover timer */ 5018 init_timer(&phba->fcf.redisc_wait); 5019 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 5020 phba->fcf.redisc_wait.data = (unsigned long)phba; 5021 5022 /* 5023 * Control structure for handling external multi-buffer mailbox 5024 * command pass-through. 5025 */ 5026 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 5027 sizeof(struct lpfc_mbox_ext_buf_ctx)); 5028 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 5029 5030 phba->max_vpi = LPFC_MAX_VPI; 5031 5032 /* This will be set to correct value after the read_config mbox */ 5033 phba->max_vports = 0; 5034 5035 /* Program the default value of vlan_id and fc_map */ 5036 phba->valid_vlan = 0; 5037 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5038 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5039 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5040 5041 /* 5042 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 5043 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. 5044 */ 5045 if (!phba->sli.ring) 5046 phba->sli.ring = kzalloc( 5047 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * 5048 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 5049 if (!phba->sli.ring) 5050 return -ENOMEM; 5051 5052 /* 5053 * It doesn't matter what family our adapter is in, we are 5054 * limited to 2 Pages, 512 SGEs, for our SGL. 5055 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 5056 */ 5057 max_buf_size = (2 * SLI4_PAGE_SIZE); 5058 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) 5059 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; 5060 5061 /* 5062 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5063 * used to create the sg_dma_buf_pool must be dynamically calculated. 5064 */ 5065 5066 if (phba->cfg_enable_bg) { 5067 /* 5068 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5069 * the FCP rsp, and a SGE for each. Sice we have no control 5070 * over how many protection data segments the SCSI Layer 5071 * will hand us (ie: there could be one for every block 5072 * in the IO), we just allocate enough SGEs to accomidate 5073 * our max amount and we need to limit lpfc_sg_seg_cnt to 5074 * minimize the risk of running out. 5075 */ 5076 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5077 sizeof(struct fcp_rsp) + max_buf_size; 5078 5079 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 5080 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 5081 5082 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) 5083 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; 5084 } else { 5085 /* 5086 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5087 * the FCP rsp, a SGE for each, and a SGE for up to 5088 * cfg_sg_seg_cnt data segments. 5089 */ 5090 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5091 sizeof(struct fcp_rsp) + 5092 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 5093 5094 /* Total SGEs for scsi_sg_list */ 5095 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5096 /* 5097 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need 5098 * to post 1 page for the SGL. 5099 */ 5100 } 5101 5102 /* Initialize the host templates with the updated values. */ 5103 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5104 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5105 5106 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5107 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5108 else 5109 phba->cfg_sg_dma_buf_size = 5110 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 5111 5112 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5113 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", 5114 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5115 phba->cfg_total_seg_cnt); 5116 5117 /* Initialize buffer queue management fields */ 5118 hbq_count = lpfc_sli_hbq_count(); 5119 for (i = 0; i < hbq_count; ++i) 5120 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5121 INIT_LIST_HEAD(&phba->rb_pend_list); 5122 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 5123 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 5124 5125 /* 5126 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 5127 */ 5128 /* Initialize the Abort scsi buffer list used by driver */ 5129 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 5130 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 5131 /* This abort list used by worker thread */ 5132 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 5133 5134 /* 5135 * Initialize driver internal slow-path work queues 5136 */ 5137 5138 /* Driver internel slow-path CQ Event pool */ 5139 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 5140 /* Response IOCB work queue list */ 5141 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 5142 /* Asynchronous event CQ Event work queue list */ 5143 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 5144 /* Fast-path XRI aborted CQ Event work queue list */ 5145 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 5146 /* Slow-path XRI aborted CQ Event work queue list */ 5147 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 5148 /* Receive queue CQ Event work queue list */ 5149 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 5150 5151 /* Initialize extent block lists. */ 5152 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 5153 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 5154 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 5155 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 5156 5157 /* Initialize the driver internal SLI layer lists. */ 5158 lpfc_sli_setup(phba); 5159 lpfc_sli_queue_setup(phba); 5160 5161 /* Allocate device driver memory */ 5162 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 5163 if (rc) 5164 return -ENOMEM; 5165 5166 /* IF Type 2 ports get initialized now. */ 5167 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5168 LPFC_SLI_INTF_IF_TYPE_2) { 5169 rc = lpfc_pci_function_reset(phba); 5170 if (unlikely(rc)) 5171 return -ENODEV; 5172 } 5173 5174 /* Create the bootstrap mailbox command */ 5175 rc = lpfc_create_bootstrap_mbox(phba); 5176 if (unlikely(rc)) 5177 goto out_free_mem; 5178 5179 /* Set up the host's endian order with the device. */ 5180 rc = lpfc_setup_endian_order(phba); 5181 if (unlikely(rc)) 5182 goto out_free_bsmbx; 5183 5184 /* Set up the hba's configuration parameters. */ 5185 rc = lpfc_sli4_read_config(phba); 5186 if (unlikely(rc)) 5187 goto out_free_bsmbx; 5188 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 5189 if (unlikely(rc)) 5190 goto out_free_bsmbx; 5191 5192 /* IF Type 0 ports get initialized now. */ 5193 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5194 LPFC_SLI_INTF_IF_TYPE_0) { 5195 rc = lpfc_pci_function_reset(phba); 5196 if (unlikely(rc)) 5197 goto out_free_bsmbx; 5198 } 5199 5200 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5201 GFP_KERNEL); 5202 if (!mboxq) { 5203 rc = -ENOMEM; 5204 goto out_free_bsmbx; 5205 } 5206 5207 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 5208 lpfc_supported_pages(mboxq); 5209 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5210 if (!rc) { 5211 mqe = &mboxq->u.mqe; 5212 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 5213 LPFC_MAX_SUPPORTED_PAGES); 5214 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 5215 switch (pn_page[i]) { 5216 case LPFC_SLI4_PARAMETERS: 5217 phba->sli4_hba.pc_sli4_params.supported = 1; 5218 break; 5219 default: 5220 break; 5221 } 5222 } 5223 /* Read the port's SLI4 Parameters capabilities if supported. */ 5224 if (phba->sli4_hba.pc_sli4_params.supported) 5225 rc = lpfc_pc_sli4_params_get(phba, mboxq); 5226 if (rc) { 5227 mempool_free(mboxq, phba->mbox_mem_pool); 5228 rc = -EIO; 5229 goto out_free_bsmbx; 5230 } 5231 } 5232 /* 5233 * Get sli4 parameters that override parameters from Port capabilities. 5234 * If this call fails, it isn't critical unless the SLI4 parameters come 5235 * back in conflict. 5236 */ 5237 rc = lpfc_get_sli4_parameters(phba, mboxq); 5238 if (rc) { 5239 if (phba->sli4_hba.extents_in_use && 5240 phba->sli4_hba.rpi_hdrs_in_use) { 5241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5242 "2999 Unsupported SLI4 Parameters " 5243 "Extents and RPI headers enabled.\n"); 5244 goto out_free_bsmbx; 5245 } 5246 } 5247 mempool_free(mboxq, phba->mbox_mem_pool); 5248 5249 /* Verify OAS is supported */ 5250 lpfc_sli4_oas_verify(phba); 5251 if (phba->cfg_fof) 5252 fof_vectors = 1; 5253 5254 /* Verify all the SLI4 queues */ 5255 rc = lpfc_sli4_queue_verify(phba); 5256 if (rc) 5257 goto out_free_bsmbx; 5258 5259 /* Create driver internal CQE event pool */ 5260 rc = lpfc_sli4_cq_event_pool_create(phba); 5261 if (rc) 5262 goto out_free_bsmbx; 5263 5264 /* Initialize sgl lists per host */ 5265 lpfc_init_sgl_list(phba); 5266 5267 /* Allocate and initialize active sgl array */ 5268 rc = lpfc_init_active_sgl_array(phba); 5269 if (rc) { 5270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5271 "1430 Failed to initialize sgl list.\n"); 5272 goto out_destroy_cq_event_pool; 5273 } 5274 rc = lpfc_sli4_init_rpi_hdrs(phba); 5275 if (rc) { 5276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5277 "1432 Failed to initialize rpi headers.\n"); 5278 goto out_free_active_sgl; 5279 } 5280 5281 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 5282 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 5283 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 5284 GFP_KERNEL); 5285 if (!phba->fcf.fcf_rr_bmask) { 5286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5287 "2759 Failed allocate memory for FCF round " 5288 "robin failover bmask\n"); 5289 rc = -ENOMEM; 5290 goto out_remove_rpi_hdrs; 5291 } 5292 5293 phba->sli4_hba.fcp_eq_hdl = 5294 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 5295 (fof_vectors + phba->cfg_fcp_io_channel)), 5296 GFP_KERNEL); 5297 if (!phba->sli4_hba.fcp_eq_hdl) { 5298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5299 "2572 Failed allocate memory for " 5300 "fast-path per-EQ handle array\n"); 5301 rc = -ENOMEM; 5302 goto out_free_fcf_rr_bmask; 5303 } 5304 5305 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 5306 (fof_vectors + 5307 phba->cfg_fcp_io_channel)), GFP_KERNEL); 5308 if (!phba->sli4_hba.msix_entries) { 5309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5310 "2573 Failed allocate memory for msi-x " 5311 "interrupt vector entries\n"); 5312 rc = -ENOMEM; 5313 goto out_free_fcp_eq_hdl; 5314 } 5315 5316 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * 5317 phba->sli4_hba.num_present_cpu), 5318 GFP_KERNEL); 5319 if (!phba->sli4_hba.cpu_map) { 5320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5321 "3327 Failed allocate memory for msi-x " 5322 "interrupt vector mapping\n"); 5323 rc = -ENOMEM; 5324 goto out_free_msix; 5325 } 5326 if (lpfc_used_cpu == NULL) { 5327 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), 5328 GFP_KERNEL); 5329 if (!lpfc_used_cpu) { 5330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5331 "3335 Failed allocate memory for msi-x " 5332 "interrupt vector mapping\n"); 5333 kfree(phba->sli4_hba.cpu_map); 5334 rc = -ENOMEM; 5335 goto out_free_msix; 5336 } 5337 for (i = 0; i < lpfc_present_cpu; i++) 5338 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; 5339 } 5340 5341 /* Initialize io channels for round robin */ 5342 cpup = phba->sli4_hba.cpu_map; 5343 rc = 0; 5344 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 5345 cpup->channel_id = rc; 5346 rc++; 5347 if (rc >= phba->cfg_fcp_io_channel) 5348 rc = 0; 5349 } 5350 5351 /* 5352 * Enable sr-iov virtual functions if supported and configured 5353 * through the module parameter. 5354 */ 5355 if (phba->cfg_sriov_nr_virtfn > 0) { 5356 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5357 phba->cfg_sriov_nr_virtfn); 5358 if (rc) { 5359 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5360 "3020 Requested number of SR-IOV " 5361 "virtual functions (%d) is not " 5362 "supported\n", 5363 phba->cfg_sriov_nr_virtfn); 5364 phba->cfg_sriov_nr_virtfn = 0; 5365 } 5366 } 5367 5368 return 0; 5369 5370 out_free_msix: 5371 kfree(phba->sli4_hba.msix_entries); 5372 out_free_fcp_eq_hdl: 5373 kfree(phba->sli4_hba.fcp_eq_hdl); 5374 out_free_fcf_rr_bmask: 5375 kfree(phba->fcf.fcf_rr_bmask); 5376 out_remove_rpi_hdrs: 5377 lpfc_sli4_remove_rpi_hdrs(phba); 5378 out_free_active_sgl: 5379 lpfc_free_active_sgl(phba); 5380 out_destroy_cq_event_pool: 5381 lpfc_sli4_cq_event_pool_destroy(phba); 5382 out_free_bsmbx: 5383 lpfc_destroy_bootstrap_mbox(phba); 5384 out_free_mem: 5385 lpfc_mem_free(phba); 5386 return rc; 5387 } 5388 5389 /** 5390 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 5391 * @phba: pointer to lpfc hba data structure. 5392 * 5393 * This routine is invoked to unset the driver internal resources set up 5394 * specific for supporting the SLI-4 HBA device it attached to. 5395 **/ 5396 static void 5397 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 5398 { 5399 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 5400 5401 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 5402 kfree(phba->sli4_hba.cpu_map); 5403 phba->sli4_hba.num_present_cpu = 0; 5404 phba->sli4_hba.num_online_cpu = 0; 5405 phba->sli4_hba.curr_disp_cpu = 0; 5406 5407 /* Free memory allocated for msi-x interrupt vector entries */ 5408 kfree(phba->sli4_hba.msix_entries); 5409 5410 /* Free memory allocated for fast-path work queue handles */ 5411 kfree(phba->sli4_hba.fcp_eq_hdl); 5412 5413 /* Free the allocated rpi headers. */ 5414 lpfc_sli4_remove_rpi_hdrs(phba); 5415 lpfc_sli4_remove_rpis(phba); 5416 5417 /* Free eligible FCF index bmask */ 5418 kfree(phba->fcf.fcf_rr_bmask); 5419 5420 /* Free the ELS sgl list */ 5421 lpfc_free_active_sgl(phba); 5422 lpfc_free_els_sgl_list(phba); 5423 5424 /* Free the completion queue EQ event pool */ 5425 lpfc_sli4_cq_event_release_all(phba); 5426 lpfc_sli4_cq_event_pool_destroy(phba); 5427 5428 /* Release resource identifiers. */ 5429 lpfc_sli4_dealloc_resource_identifiers(phba); 5430 5431 /* Free the bsmbx region. */ 5432 lpfc_destroy_bootstrap_mbox(phba); 5433 5434 /* Free the SLI Layer memory with SLI4 HBAs */ 5435 lpfc_mem_free_all(phba); 5436 5437 /* Free the current connect table */ 5438 list_for_each_entry_safe(conn_entry, next_conn_entry, 5439 &phba->fcf_conn_rec_list, list) { 5440 list_del_init(&conn_entry->list); 5441 kfree(conn_entry); 5442 } 5443 5444 return; 5445 } 5446 5447 /** 5448 * lpfc_init_api_table_setup - Set up init api function jump table 5449 * @phba: The hba struct for which this call is being executed. 5450 * @dev_grp: The HBA PCI-Device group number. 5451 * 5452 * This routine sets up the device INIT interface API function jump table 5453 * in @phba struct. 5454 * 5455 * Returns: 0 - success, -ENODEV - failure. 5456 **/ 5457 int 5458 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5459 { 5460 phba->lpfc_hba_init_link = lpfc_hba_init_link; 5461 phba->lpfc_hba_down_link = lpfc_hba_down_link; 5462 phba->lpfc_selective_reset = lpfc_selective_reset; 5463 switch (dev_grp) { 5464 case LPFC_PCI_DEV_LP: 5465 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 5466 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 5467 phba->lpfc_stop_port = lpfc_stop_port_s3; 5468 break; 5469 case LPFC_PCI_DEV_OC: 5470 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 5471 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 5472 phba->lpfc_stop_port = lpfc_stop_port_s4; 5473 break; 5474 default: 5475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5476 "1431 Invalid HBA PCI-device group: 0x%x\n", 5477 dev_grp); 5478 return -ENODEV; 5479 break; 5480 } 5481 return 0; 5482 } 5483 5484 /** 5485 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 5486 * @phba: pointer to lpfc hba data structure. 5487 * 5488 * This routine is invoked to set up the driver internal resources before the 5489 * device specific resource setup to support the HBA device it attached to. 5490 * 5491 * Return codes 5492 * 0 - successful 5493 * other values - error 5494 **/ 5495 static int 5496 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 5497 { 5498 /* 5499 * Driver resources common to all SLI revisions 5500 */ 5501 atomic_set(&phba->fast_event_count, 0); 5502 spin_lock_init(&phba->hbalock); 5503 5504 /* Initialize ndlp management spinlock */ 5505 spin_lock_init(&phba->ndlp_lock); 5506 5507 INIT_LIST_HEAD(&phba->port_list); 5508 INIT_LIST_HEAD(&phba->work_list); 5509 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5510 5511 /* Initialize the wait queue head for the kernel thread */ 5512 init_waitqueue_head(&phba->work_waitq); 5513 5514 /* Initialize the scsi buffer list used by driver for scsi IO */ 5515 spin_lock_init(&phba->scsi_buf_list_get_lock); 5516 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 5517 spin_lock_init(&phba->scsi_buf_list_put_lock); 5518 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 5519 5520 /* Initialize the fabric iocb list */ 5521 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5522 5523 /* Initialize list to save ELS buffers */ 5524 INIT_LIST_HEAD(&phba->elsbuf); 5525 5526 /* Initialize FCF connection rec list */ 5527 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5528 5529 /* Initialize OAS configuration list */ 5530 spin_lock_init(&phba->devicelock); 5531 INIT_LIST_HEAD(&phba->luns); 5532 5533 return 0; 5534 } 5535 5536 /** 5537 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 5538 * @phba: pointer to lpfc hba data structure. 5539 * 5540 * This routine is invoked to set up the driver internal resources after the 5541 * device specific resource setup to support the HBA device it attached to. 5542 * 5543 * Return codes 5544 * 0 - successful 5545 * other values - error 5546 **/ 5547 static int 5548 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 5549 { 5550 int error; 5551 5552 /* Startup the kernel thread for this host adapter. */ 5553 phba->worker_thread = kthread_run(lpfc_do_work, phba, 5554 "lpfc_worker_%d", phba->brd_no); 5555 if (IS_ERR(phba->worker_thread)) { 5556 error = PTR_ERR(phba->worker_thread); 5557 return error; 5558 } 5559 5560 return 0; 5561 } 5562 5563 /** 5564 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 5565 * @phba: pointer to lpfc hba data structure. 5566 * 5567 * This routine is invoked to unset the driver internal resources set up after 5568 * the device specific resource setup for supporting the HBA device it 5569 * attached to. 5570 **/ 5571 static void 5572 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 5573 { 5574 /* Stop kernel worker thread */ 5575 kthread_stop(phba->worker_thread); 5576 } 5577 5578 /** 5579 * lpfc_free_iocb_list - Free iocb list. 5580 * @phba: pointer to lpfc hba data structure. 5581 * 5582 * This routine is invoked to free the driver's IOCB list and memory. 5583 **/ 5584 static void 5585 lpfc_free_iocb_list(struct lpfc_hba *phba) 5586 { 5587 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 5588 5589 spin_lock_irq(&phba->hbalock); 5590 list_for_each_entry_safe(iocbq_entry, iocbq_next, 5591 &phba->lpfc_iocb_list, list) { 5592 list_del(&iocbq_entry->list); 5593 kfree(iocbq_entry); 5594 phba->total_iocbq_bufs--; 5595 } 5596 spin_unlock_irq(&phba->hbalock); 5597 5598 return; 5599 } 5600 5601 /** 5602 * lpfc_init_iocb_list - Allocate and initialize iocb list. 5603 * @phba: pointer to lpfc hba data structure. 5604 * 5605 * This routine is invoked to allocate and initizlize the driver's IOCB 5606 * list and set up the IOCB tag array accordingly. 5607 * 5608 * Return codes 5609 * 0 - successful 5610 * other values - error 5611 **/ 5612 static int 5613 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 5614 { 5615 struct lpfc_iocbq *iocbq_entry = NULL; 5616 uint16_t iotag; 5617 int i; 5618 5619 /* Initialize and populate the iocb list per host. */ 5620 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 5621 for (i = 0; i < iocb_count; i++) { 5622 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 5623 if (iocbq_entry == NULL) { 5624 printk(KERN_ERR "%s: only allocated %d iocbs of " 5625 "expected %d count. Unloading driver.\n", 5626 __func__, i, LPFC_IOCB_LIST_CNT); 5627 goto out_free_iocbq; 5628 } 5629 5630 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 5631 if (iotag == 0) { 5632 kfree(iocbq_entry); 5633 printk(KERN_ERR "%s: failed to allocate IOTAG. " 5634 "Unloading driver.\n", __func__); 5635 goto out_free_iocbq; 5636 } 5637 iocbq_entry->sli4_lxritag = NO_XRI; 5638 iocbq_entry->sli4_xritag = NO_XRI; 5639 5640 spin_lock_irq(&phba->hbalock); 5641 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 5642 phba->total_iocbq_bufs++; 5643 spin_unlock_irq(&phba->hbalock); 5644 } 5645 5646 return 0; 5647 5648 out_free_iocbq: 5649 lpfc_free_iocb_list(phba); 5650 5651 return -ENOMEM; 5652 } 5653 5654 /** 5655 * lpfc_free_sgl_list - Free a given sgl list. 5656 * @phba: pointer to lpfc hba data structure. 5657 * @sglq_list: pointer to the head of sgl list. 5658 * 5659 * This routine is invoked to free a give sgl list and memory. 5660 **/ 5661 void 5662 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 5663 { 5664 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 5665 5666 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 5667 list_del(&sglq_entry->list); 5668 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 5669 kfree(sglq_entry); 5670 } 5671 } 5672 5673 /** 5674 * lpfc_free_els_sgl_list - Free els sgl list. 5675 * @phba: pointer to lpfc hba data structure. 5676 * 5677 * This routine is invoked to free the driver's els sgl list and memory. 5678 **/ 5679 static void 5680 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 5681 { 5682 LIST_HEAD(sglq_list); 5683 5684 /* Retrieve all els sgls from driver list */ 5685 spin_lock_irq(&phba->hbalock); 5686 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 5687 spin_unlock_irq(&phba->hbalock); 5688 5689 /* Now free the sgl list */ 5690 lpfc_free_sgl_list(phba, &sglq_list); 5691 } 5692 5693 /** 5694 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 5695 * @phba: pointer to lpfc hba data structure. 5696 * 5697 * This routine is invoked to allocate the driver's active sgl memory. 5698 * This array will hold the sglq_entry's for active IOs. 5699 **/ 5700 static int 5701 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 5702 { 5703 int size; 5704 size = sizeof(struct lpfc_sglq *); 5705 size *= phba->sli4_hba.max_cfg_param.max_xri; 5706 5707 phba->sli4_hba.lpfc_sglq_active_list = 5708 kzalloc(size, GFP_KERNEL); 5709 if (!phba->sli4_hba.lpfc_sglq_active_list) 5710 return -ENOMEM; 5711 return 0; 5712 } 5713 5714 /** 5715 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 5716 * @phba: pointer to lpfc hba data structure. 5717 * 5718 * This routine is invoked to walk through the array of active sglq entries 5719 * and free all of the resources. 5720 * This is just a place holder for now. 5721 **/ 5722 static void 5723 lpfc_free_active_sgl(struct lpfc_hba *phba) 5724 { 5725 kfree(phba->sli4_hba.lpfc_sglq_active_list); 5726 } 5727 5728 /** 5729 * lpfc_init_sgl_list - Allocate and initialize sgl list. 5730 * @phba: pointer to lpfc hba data structure. 5731 * 5732 * This routine is invoked to allocate and initizlize the driver's sgl 5733 * list and set up the sgl xritag tag array accordingly. 5734 * 5735 **/ 5736 static void 5737 lpfc_init_sgl_list(struct lpfc_hba *phba) 5738 { 5739 /* Initialize and populate the sglq list per host/VF. */ 5740 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 5741 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 5742 5743 /* els xri-sgl book keeping */ 5744 phba->sli4_hba.els_xri_cnt = 0; 5745 5746 /* scsi xri-buffer book keeping */ 5747 phba->sli4_hba.scsi_xri_cnt = 0; 5748 } 5749 5750 /** 5751 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 5752 * @phba: pointer to lpfc hba data structure. 5753 * 5754 * This routine is invoked to post rpi header templates to the 5755 * port for those SLI4 ports that do not support extents. This routine 5756 * posts a PAGE_SIZE memory region to the port to hold up to 5757 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 5758 * and should be called only when interrupts are disabled. 5759 * 5760 * Return codes 5761 * 0 - successful 5762 * -ERROR - otherwise. 5763 **/ 5764 int 5765 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5766 { 5767 int rc = 0; 5768 struct lpfc_rpi_hdr *rpi_hdr; 5769 5770 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5771 if (!phba->sli4_hba.rpi_hdrs_in_use) 5772 return rc; 5773 if (phba->sli4_hba.extents_in_use) 5774 return -EIO; 5775 5776 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5777 if (!rpi_hdr) { 5778 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5779 "0391 Error during rpi post operation\n"); 5780 lpfc_sli4_remove_rpis(phba); 5781 rc = -ENODEV; 5782 } 5783 5784 return rc; 5785 } 5786 5787 /** 5788 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 5789 * @phba: pointer to lpfc hba data structure. 5790 * 5791 * This routine is invoked to allocate a single 4KB memory region to 5792 * support rpis and stores them in the phba. This single region 5793 * provides support for up to 64 rpis. The region is used globally 5794 * by the device. 5795 * 5796 * Returns: 5797 * A valid rpi hdr on success. 5798 * A NULL pointer on any failure. 5799 **/ 5800 struct lpfc_rpi_hdr * 5801 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 5802 { 5803 uint16_t rpi_limit, curr_rpi_range; 5804 struct lpfc_dmabuf *dmabuf; 5805 struct lpfc_rpi_hdr *rpi_hdr; 5806 uint32_t rpi_count; 5807 5808 /* 5809 * If the SLI4 port supports extents, posting the rpi header isn't 5810 * required. Set the expected maximum count and let the actual value 5811 * get set when extents are fully allocated. 5812 */ 5813 if (!phba->sli4_hba.rpi_hdrs_in_use) 5814 return NULL; 5815 if (phba->sli4_hba.extents_in_use) 5816 return NULL; 5817 5818 /* The limit on the logical index is just the max_rpi count. */ 5819 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5820 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5821 5822 spin_lock_irq(&phba->hbalock); 5823 /* 5824 * Establish the starting RPI in this header block. The starting 5825 * rpi is normalized to a zero base because the physical rpi is 5826 * port based. 5827 */ 5828 curr_rpi_range = phba->sli4_hba.next_rpi; 5829 spin_unlock_irq(&phba->hbalock); 5830 5831 /* 5832 * The port has a limited number of rpis. The increment here 5833 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 5834 * and to allow the full max_rpi range per port. 5835 */ 5836 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 5837 rpi_count = rpi_limit - curr_rpi_range; 5838 else 5839 rpi_count = LPFC_RPI_HDR_COUNT; 5840 5841 if (!rpi_count) 5842 return NULL; 5843 /* 5844 * First allocate the protocol header region for the port. The 5845 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5846 */ 5847 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5848 if (!dmabuf) 5849 return NULL; 5850 5851 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5852 LPFC_HDR_TEMPLATE_SIZE, 5853 &dmabuf->phys, 5854 GFP_KERNEL); 5855 if (!dmabuf->virt) { 5856 rpi_hdr = NULL; 5857 goto err_free_dmabuf; 5858 } 5859 5860 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 5861 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 5862 rpi_hdr = NULL; 5863 goto err_free_coherent; 5864 } 5865 5866 /* Save the rpi header data for cleanup later. */ 5867 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 5868 if (!rpi_hdr) 5869 goto err_free_coherent; 5870 5871 rpi_hdr->dmabuf = dmabuf; 5872 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5873 rpi_hdr->page_count = 1; 5874 spin_lock_irq(&phba->hbalock); 5875 5876 /* The rpi_hdr stores the logical index only. */ 5877 rpi_hdr->start_rpi = curr_rpi_range; 5878 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5879 5880 /* 5881 * The next_rpi stores the next logical module-64 rpi value used 5882 * to post physical rpis in subsequent rpi postings. 5883 */ 5884 phba->sli4_hba.next_rpi += rpi_count; 5885 spin_unlock_irq(&phba->hbalock); 5886 return rpi_hdr; 5887 5888 err_free_coherent: 5889 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 5890 dmabuf->virt, dmabuf->phys); 5891 err_free_dmabuf: 5892 kfree(dmabuf); 5893 return NULL; 5894 } 5895 5896 /** 5897 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 5898 * @phba: pointer to lpfc hba data structure. 5899 * 5900 * This routine is invoked to remove all memory resources allocated 5901 * to support rpis for SLI4 ports not supporting extents. This routine 5902 * presumes the caller has released all rpis consumed by fabric or port 5903 * logins and is prepared to have the header pages removed. 5904 **/ 5905 void 5906 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5907 { 5908 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5909 5910 if (!phba->sli4_hba.rpi_hdrs_in_use) 5911 goto exit; 5912 5913 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5914 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5915 list_del(&rpi_hdr->list); 5916 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 5917 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 5918 kfree(rpi_hdr->dmabuf); 5919 kfree(rpi_hdr); 5920 } 5921 exit: 5922 /* There are no rpis available to the port now. */ 5923 phba->sli4_hba.next_rpi = 0; 5924 } 5925 5926 /** 5927 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 5928 * @pdev: pointer to pci device data structure. 5929 * 5930 * This routine is invoked to allocate the driver hba data structure for an 5931 * HBA device. If the allocation is successful, the phba reference to the 5932 * PCI device data structure is set. 5933 * 5934 * Return codes 5935 * pointer to @phba - successful 5936 * NULL - error 5937 **/ 5938 static struct lpfc_hba * 5939 lpfc_hba_alloc(struct pci_dev *pdev) 5940 { 5941 struct lpfc_hba *phba; 5942 5943 /* Allocate memory for HBA structure */ 5944 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 5945 if (!phba) { 5946 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 5947 return NULL; 5948 } 5949 5950 /* Set reference to PCI device in HBA structure */ 5951 phba->pcidev = pdev; 5952 5953 /* Assign an unused board number */ 5954 phba->brd_no = lpfc_get_instance(); 5955 if (phba->brd_no < 0) { 5956 kfree(phba); 5957 return NULL; 5958 } 5959 5960 spin_lock_init(&phba->ct_ev_lock); 5961 INIT_LIST_HEAD(&phba->ct_ev_waiters); 5962 5963 return phba; 5964 } 5965 5966 /** 5967 * lpfc_hba_free - Free driver hba data structure with a device. 5968 * @phba: pointer to lpfc hba data structure. 5969 * 5970 * This routine is invoked to free the driver hba data structure with an 5971 * HBA device. 5972 **/ 5973 static void 5974 lpfc_hba_free(struct lpfc_hba *phba) 5975 { 5976 /* Release the driver assigned board number */ 5977 idr_remove(&lpfc_hba_index, phba->brd_no); 5978 5979 /* Free memory allocated with sli rings */ 5980 kfree(phba->sli.ring); 5981 phba->sli.ring = NULL; 5982 5983 kfree(phba); 5984 return; 5985 } 5986 5987 /** 5988 * lpfc_create_shost - Create hba physical port with associated scsi host. 5989 * @phba: pointer to lpfc hba data structure. 5990 * 5991 * This routine is invoked to create HBA physical port and associate a SCSI 5992 * host with it. 5993 * 5994 * Return codes 5995 * 0 - successful 5996 * other values - error 5997 **/ 5998 static int 5999 lpfc_create_shost(struct lpfc_hba *phba) 6000 { 6001 struct lpfc_vport *vport; 6002 struct Scsi_Host *shost; 6003 6004 /* Initialize HBA FC structure */ 6005 phba->fc_edtov = FF_DEF_EDTOV; 6006 phba->fc_ratov = FF_DEF_RATOV; 6007 phba->fc_altov = FF_DEF_ALTOV; 6008 phba->fc_arbtov = FF_DEF_ARBTOV; 6009 6010 atomic_set(&phba->sdev_cnt, 0); 6011 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6012 if (!vport) 6013 return -ENODEV; 6014 6015 shost = lpfc_shost_from_vport(vport); 6016 phba->pport = vport; 6017 lpfc_debugfs_initialize(vport); 6018 /* Put reference to SCSI host to driver's device private data */ 6019 pci_set_drvdata(phba->pcidev, shost); 6020 6021 return 0; 6022 } 6023 6024 /** 6025 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 6026 * @phba: pointer to lpfc hba data structure. 6027 * 6028 * This routine is invoked to destroy HBA physical port and the associated 6029 * SCSI host. 6030 **/ 6031 static void 6032 lpfc_destroy_shost(struct lpfc_hba *phba) 6033 { 6034 struct lpfc_vport *vport = phba->pport; 6035 6036 /* Destroy physical port that associated with the SCSI host */ 6037 destroy_port(vport); 6038 6039 return; 6040 } 6041 6042 /** 6043 * lpfc_setup_bg - Setup Block guard structures and debug areas. 6044 * @phba: pointer to lpfc hba data structure. 6045 * @shost: the shost to be used to detect Block guard settings. 6046 * 6047 * This routine sets up the local Block guard protocol settings for @shost. 6048 * This routine also allocates memory for debugging bg buffers. 6049 **/ 6050 static void 6051 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 6052 { 6053 uint32_t old_mask; 6054 uint32_t old_guard; 6055 6056 int pagecnt = 10; 6057 if (lpfc_prot_mask && lpfc_prot_guard) { 6058 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6059 "1478 Registering BlockGuard with the " 6060 "SCSI layer\n"); 6061 6062 old_mask = lpfc_prot_mask; 6063 old_guard = lpfc_prot_guard; 6064 6065 /* Only allow supported values */ 6066 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 6067 SHOST_DIX_TYPE0_PROTECTION | 6068 SHOST_DIX_TYPE1_PROTECTION); 6069 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); 6070 6071 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 6072 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 6073 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 6074 6075 if (lpfc_prot_mask && lpfc_prot_guard) { 6076 if ((old_mask != lpfc_prot_mask) || 6077 (old_guard != lpfc_prot_guard)) 6078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6079 "1475 Registering BlockGuard with the " 6080 "SCSI layer: mask %d guard %d\n", 6081 lpfc_prot_mask, lpfc_prot_guard); 6082 6083 scsi_host_set_prot(shost, lpfc_prot_mask); 6084 scsi_host_set_guard(shost, lpfc_prot_guard); 6085 } else 6086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6087 "1479 Not Registering BlockGuard with the SCSI " 6088 "layer, Bad protection parameters: %d %d\n", 6089 old_mask, old_guard); 6090 } 6091 6092 if (!_dump_buf_data) { 6093 while (pagecnt) { 6094 spin_lock_init(&_dump_buf_lock); 6095 _dump_buf_data = 6096 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6097 if (_dump_buf_data) { 6098 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6099 "9043 BLKGRD: allocated %d pages for " 6100 "_dump_buf_data at 0x%p\n", 6101 (1 << pagecnt), _dump_buf_data); 6102 _dump_buf_data_order = pagecnt; 6103 memset(_dump_buf_data, 0, 6104 ((1 << PAGE_SHIFT) << pagecnt)); 6105 break; 6106 } else 6107 --pagecnt; 6108 } 6109 if (!_dump_buf_data_order) 6110 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6111 "9044 BLKGRD: ERROR unable to allocate " 6112 "memory for hexdump\n"); 6113 } else 6114 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6115 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 6116 "\n", _dump_buf_data); 6117 if (!_dump_buf_dif) { 6118 while (pagecnt) { 6119 _dump_buf_dif = 6120 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6121 if (_dump_buf_dif) { 6122 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6123 "9046 BLKGRD: allocated %d pages for " 6124 "_dump_buf_dif at 0x%p\n", 6125 (1 << pagecnt), _dump_buf_dif); 6126 _dump_buf_dif_order = pagecnt; 6127 memset(_dump_buf_dif, 0, 6128 ((1 << PAGE_SHIFT) << pagecnt)); 6129 break; 6130 } else 6131 --pagecnt; 6132 } 6133 if (!_dump_buf_dif_order) 6134 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6135 "9047 BLKGRD: ERROR unable to allocate " 6136 "memory for hexdump\n"); 6137 } else 6138 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6139 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 6140 _dump_buf_dif); 6141 } 6142 6143 /** 6144 * lpfc_post_init_setup - Perform necessary device post initialization setup. 6145 * @phba: pointer to lpfc hba data structure. 6146 * 6147 * This routine is invoked to perform all the necessary post initialization 6148 * setup for the device. 6149 **/ 6150 static void 6151 lpfc_post_init_setup(struct lpfc_hba *phba) 6152 { 6153 struct Scsi_Host *shost; 6154 struct lpfc_adapter_event_header adapter_event; 6155 6156 /* Get the default values for Model Name and Description */ 6157 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 6158 6159 /* 6160 * hba setup may have changed the hba_queue_depth so we need to 6161 * adjust the value of can_queue. 6162 */ 6163 shost = pci_get_drvdata(phba->pcidev); 6164 shost->can_queue = phba->cfg_hba_queue_depth - 10; 6165 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 6166 lpfc_setup_bg(phba, shost); 6167 6168 lpfc_host_attrib_init(shost); 6169 6170 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 6171 spin_lock_irq(shost->host_lock); 6172 lpfc_poll_start_timer(phba); 6173 spin_unlock_irq(shost->host_lock); 6174 } 6175 6176 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6177 "0428 Perform SCSI scan\n"); 6178 /* Send board arrival event to upper layer */ 6179 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 6180 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 6181 fc_host_post_vendor_event(shost, fc_get_event_number(), 6182 sizeof(adapter_event), 6183 (char *) &adapter_event, 6184 LPFC_NL_VENDOR_ID); 6185 return; 6186 } 6187 6188 /** 6189 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 6190 * @phba: pointer to lpfc hba data structure. 6191 * 6192 * This routine is invoked to set up the PCI device memory space for device 6193 * with SLI-3 interface spec. 6194 * 6195 * Return codes 6196 * 0 - successful 6197 * other values - error 6198 **/ 6199 static int 6200 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 6201 { 6202 struct pci_dev *pdev; 6203 unsigned long bar0map_len, bar2map_len; 6204 int i, hbq_count; 6205 void *ptr; 6206 int error = -ENODEV; 6207 6208 /* Obtain PCI device reference */ 6209 if (!phba->pcidev) 6210 return error; 6211 else 6212 pdev = phba->pcidev; 6213 6214 /* Set the device DMA mask size */ 6215 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6216 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6217 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6218 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6219 return error; 6220 } 6221 } 6222 6223 /* Get the bus address of Bar0 and Bar2 and the number of bytes 6224 * required by each mapping. 6225 */ 6226 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6227 bar0map_len = pci_resource_len(pdev, 0); 6228 6229 phba->pci_bar2_map = pci_resource_start(pdev, 2); 6230 bar2map_len = pci_resource_len(pdev, 2); 6231 6232 /* Map HBA SLIM to a kernel virtual address. */ 6233 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6234 if (!phba->slim_memmap_p) { 6235 dev_printk(KERN_ERR, &pdev->dev, 6236 "ioremap failed for SLIM memory.\n"); 6237 goto out; 6238 } 6239 6240 /* Map HBA Control Registers to a kernel virtual address. */ 6241 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 6242 if (!phba->ctrl_regs_memmap_p) { 6243 dev_printk(KERN_ERR, &pdev->dev, 6244 "ioremap failed for HBA control registers.\n"); 6245 goto out_iounmap_slim; 6246 } 6247 6248 /* Allocate memory for SLI-2 structures */ 6249 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 6250 SLI2_SLIM_SIZE, 6251 &phba->slim2p.phys, 6252 GFP_KERNEL); 6253 if (!phba->slim2p.virt) 6254 goto out_iounmap; 6255 6256 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 6257 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6258 phba->mbox_ext = (phba->slim2p.virt + 6259 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 6260 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 6261 phba->IOCBs = (phba->slim2p.virt + 6262 offsetof(struct lpfc_sli2_slim, IOCBs)); 6263 6264 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 6265 lpfc_sli_hbq_size(), 6266 &phba->hbqslimp.phys, 6267 GFP_KERNEL); 6268 if (!phba->hbqslimp.virt) 6269 goto out_free_slim; 6270 6271 hbq_count = lpfc_sli_hbq_count(); 6272 ptr = phba->hbqslimp.virt; 6273 for (i = 0; i < hbq_count; ++i) { 6274 phba->hbqs[i].hbq_virt = ptr; 6275 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6276 ptr += (lpfc_hbq_defs[i]->entry_count * 6277 sizeof(struct lpfc_hbq_entry)); 6278 } 6279 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6280 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6281 6282 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6283 6284 INIT_LIST_HEAD(&phba->rb_pend_list); 6285 6286 phba->MBslimaddr = phba->slim_memmap_p; 6287 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6288 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6289 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6290 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6291 6292 return 0; 6293 6294 out_free_slim: 6295 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6296 phba->slim2p.virt, phba->slim2p.phys); 6297 out_iounmap: 6298 iounmap(phba->ctrl_regs_memmap_p); 6299 out_iounmap_slim: 6300 iounmap(phba->slim_memmap_p); 6301 out: 6302 return error; 6303 } 6304 6305 /** 6306 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 6307 * @phba: pointer to lpfc hba data structure. 6308 * 6309 * This routine is invoked to unset the PCI device memory space for device 6310 * with SLI-3 interface spec. 6311 **/ 6312 static void 6313 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 6314 { 6315 struct pci_dev *pdev; 6316 6317 /* Obtain PCI device reference */ 6318 if (!phba->pcidev) 6319 return; 6320 else 6321 pdev = phba->pcidev; 6322 6323 /* Free coherent DMA memory allocated */ 6324 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6325 phba->hbqslimp.virt, phba->hbqslimp.phys); 6326 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6327 phba->slim2p.virt, phba->slim2p.phys); 6328 6329 /* I/O memory unmap */ 6330 iounmap(phba->ctrl_regs_memmap_p); 6331 iounmap(phba->slim_memmap_p); 6332 6333 return; 6334 } 6335 6336 /** 6337 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 6338 * @phba: pointer to lpfc hba data structure. 6339 * 6340 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 6341 * done and check status. 6342 * 6343 * Return 0 if successful, otherwise -ENODEV. 6344 **/ 6345 int 6346 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 6347 { 6348 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 6349 struct lpfc_register reg_data; 6350 int i, port_error = 0; 6351 uint32_t if_type; 6352 6353 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 6354 memset(®_data, 0, sizeof(reg_data)); 6355 if (!phba->sli4_hba.PSMPHRregaddr) 6356 return -ENODEV; 6357 6358 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 6359 for (i = 0; i < 3000; i++) { 6360 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 6361 &portsmphr_reg.word0) || 6362 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 6363 /* Port has a fatal POST error, break out */ 6364 port_error = -ENODEV; 6365 break; 6366 } 6367 if (LPFC_POST_STAGE_PORT_READY == 6368 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 6369 break; 6370 msleep(10); 6371 } 6372 6373 /* 6374 * If there was a port error during POST, then don't proceed with 6375 * other register reads as the data may not be valid. Just exit. 6376 */ 6377 if (port_error) { 6378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6379 "1408 Port Failed POST - portsmphr=0x%x, " 6380 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 6381 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 6382 portsmphr_reg.word0, 6383 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 6384 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 6385 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 6386 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 6387 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 6388 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 6389 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 6390 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 6391 } else { 6392 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6393 "2534 Device Info: SLIFamily=0x%x, " 6394 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 6395 "SLIHint_2=0x%x, FT=0x%x\n", 6396 bf_get(lpfc_sli_intf_sli_family, 6397 &phba->sli4_hba.sli_intf), 6398 bf_get(lpfc_sli_intf_slirev, 6399 &phba->sli4_hba.sli_intf), 6400 bf_get(lpfc_sli_intf_if_type, 6401 &phba->sli4_hba.sli_intf), 6402 bf_get(lpfc_sli_intf_sli_hint1, 6403 &phba->sli4_hba.sli_intf), 6404 bf_get(lpfc_sli_intf_sli_hint2, 6405 &phba->sli4_hba.sli_intf), 6406 bf_get(lpfc_sli_intf_func_type, 6407 &phba->sli4_hba.sli_intf)); 6408 /* 6409 * Check for other Port errors during the initialization 6410 * process. Fail the load if the port did not come up 6411 * correctly. 6412 */ 6413 if_type = bf_get(lpfc_sli_intf_if_type, 6414 &phba->sli4_hba.sli_intf); 6415 switch (if_type) { 6416 case LPFC_SLI_INTF_IF_TYPE_0: 6417 phba->sli4_hba.ue_mask_lo = 6418 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 6419 phba->sli4_hba.ue_mask_hi = 6420 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 6421 uerrlo_reg.word0 = 6422 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 6423 uerrhi_reg.word0 = 6424 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 6425 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 6426 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 6427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6428 "1422 Unrecoverable Error " 6429 "Detected during POST " 6430 "uerr_lo_reg=0x%x, " 6431 "uerr_hi_reg=0x%x, " 6432 "ue_mask_lo_reg=0x%x, " 6433 "ue_mask_hi_reg=0x%x\n", 6434 uerrlo_reg.word0, 6435 uerrhi_reg.word0, 6436 phba->sli4_hba.ue_mask_lo, 6437 phba->sli4_hba.ue_mask_hi); 6438 port_error = -ENODEV; 6439 } 6440 break; 6441 case LPFC_SLI_INTF_IF_TYPE_2: 6442 /* Final checks. The port status should be clean. */ 6443 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 6444 ®_data.word0) || 6445 (bf_get(lpfc_sliport_status_err, ®_data) && 6446 !bf_get(lpfc_sliport_status_rn, ®_data))) { 6447 phba->work_status[0] = 6448 readl(phba->sli4_hba.u.if_type2. 6449 ERR1regaddr); 6450 phba->work_status[1] = 6451 readl(phba->sli4_hba.u.if_type2. 6452 ERR2regaddr); 6453 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6454 "2888 Unrecoverable port error " 6455 "following POST: port status reg " 6456 "0x%x, port_smphr reg 0x%x, " 6457 "error 1=0x%x, error 2=0x%x\n", 6458 reg_data.word0, 6459 portsmphr_reg.word0, 6460 phba->work_status[0], 6461 phba->work_status[1]); 6462 port_error = -ENODEV; 6463 } 6464 break; 6465 case LPFC_SLI_INTF_IF_TYPE_1: 6466 default: 6467 break; 6468 } 6469 } 6470 return port_error; 6471 } 6472 6473 /** 6474 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 6475 * @phba: pointer to lpfc hba data structure. 6476 * @if_type: The SLI4 interface type getting configured. 6477 * 6478 * This routine is invoked to set up SLI4 BAR0 PCI config space register 6479 * memory map. 6480 **/ 6481 static void 6482 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 6483 { 6484 switch (if_type) { 6485 case LPFC_SLI_INTF_IF_TYPE_0: 6486 phba->sli4_hba.u.if_type0.UERRLOregaddr = 6487 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 6488 phba->sli4_hba.u.if_type0.UERRHIregaddr = 6489 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 6490 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 6491 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 6492 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 6493 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 6494 phba->sli4_hba.SLIINTFregaddr = 6495 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6496 break; 6497 case LPFC_SLI_INTF_IF_TYPE_2: 6498 phba->sli4_hba.u.if_type2.ERR1regaddr = 6499 phba->sli4_hba.conf_regs_memmap_p + 6500 LPFC_CTL_PORT_ER1_OFFSET; 6501 phba->sli4_hba.u.if_type2.ERR2regaddr = 6502 phba->sli4_hba.conf_regs_memmap_p + 6503 LPFC_CTL_PORT_ER2_OFFSET; 6504 phba->sli4_hba.u.if_type2.CTRLregaddr = 6505 phba->sli4_hba.conf_regs_memmap_p + 6506 LPFC_CTL_PORT_CTL_OFFSET; 6507 phba->sli4_hba.u.if_type2.STATUSregaddr = 6508 phba->sli4_hba.conf_regs_memmap_p + 6509 LPFC_CTL_PORT_STA_OFFSET; 6510 phba->sli4_hba.SLIINTFregaddr = 6511 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6512 phba->sli4_hba.PSMPHRregaddr = 6513 phba->sli4_hba.conf_regs_memmap_p + 6514 LPFC_CTL_PORT_SEM_OFFSET; 6515 phba->sli4_hba.RQDBregaddr = 6516 phba->sli4_hba.conf_regs_memmap_p + 6517 LPFC_ULP0_RQ_DOORBELL; 6518 phba->sli4_hba.WQDBregaddr = 6519 phba->sli4_hba.conf_regs_memmap_p + 6520 LPFC_ULP0_WQ_DOORBELL; 6521 phba->sli4_hba.EQCQDBregaddr = 6522 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 6523 phba->sli4_hba.MQDBregaddr = 6524 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 6525 phba->sli4_hba.BMBXregaddr = 6526 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 6527 break; 6528 case LPFC_SLI_INTF_IF_TYPE_1: 6529 default: 6530 dev_printk(KERN_ERR, &phba->pcidev->dev, 6531 "FATAL - unsupported SLI4 interface type - %d\n", 6532 if_type); 6533 break; 6534 } 6535 } 6536 6537 /** 6538 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 6539 * @phba: pointer to lpfc hba data structure. 6540 * 6541 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 6542 * memory map. 6543 **/ 6544 static void 6545 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 6546 { 6547 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6548 LPFC_SLIPORT_IF0_SMPHR; 6549 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6550 LPFC_HST_ISR0; 6551 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6552 LPFC_HST_IMR0; 6553 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6554 LPFC_HST_ISCR0; 6555 } 6556 6557 /** 6558 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 6559 * @phba: pointer to lpfc hba data structure. 6560 * @vf: virtual function number 6561 * 6562 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 6563 * based on the given viftual function number, @vf. 6564 * 6565 * Return 0 if successful, otherwise -ENODEV. 6566 **/ 6567 static int 6568 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 6569 { 6570 if (vf > LPFC_VIR_FUNC_MAX) 6571 return -ENODEV; 6572 6573 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6574 vf * LPFC_VFR_PAGE_SIZE + 6575 LPFC_ULP0_RQ_DOORBELL); 6576 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6577 vf * LPFC_VFR_PAGE_SIZE + 6578 LPFC_ULP0_WQ_DOORBELL); 6579 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6580 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 6581 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6582 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 6583 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6584 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 6585 return 0; 6586 } 6587 6588 /** 6589 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 6590 * @phba: pointer to lpfc hba data structure. 6591 * 6592 * This routine is invoked to create the bootstrap mailbox 6593 * region consistent with the SLI-4 interface spec. This 6594 * routine allocates all memory necessary to communicate 6595 * mailbox commands to the port and sets up all alignment 6596 * needs. No locks are expected to be held when calling 6597 * this routine. 6598 * 6599 * Return codes 6600 * 0 - successful 6601 * -ENOMEM - could not allocated memory. 6602 **/ 6603 static int 6604 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 6605 { 6606 uint32_t bmbx_size; 6607 struct lpfc_dmabuf *dmabuf; 6608 struct dma_address *dma_address; 6609 uint32_t pa_addr; 6610 uint64_t phys_addr; 6611 6612 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6613 if (!dmabuf) 6614 return -ENOMEM; 6615 6616 /* 6617 * The bootstrap mailbox region is comprised of 2 parts 6618 * plus an alignment restriction of 16 bytes. 6619 */ 6620 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 6621 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6622 bmbx_size, 6623 &dmabuf->phys, 6624 GFP_KERNEL); 6625 if (!dmabuf->virt) { 6626 kfree(dmabuf); 6627 return -ENOMEM; 6628 } 6629 memset(dmabuf->virt, 0, bmbx_size); 6630 6631 /* 6632 * Initialize the bootstrap mailbox pointers now so that the register 6633 * operations are simple later. The mailbox dma address is required 6634 * to be 16-byte aligned. Also align the virtual memory as each 6635 * maibox is copied into the bmbx mailbox region before issuing the 6636 * command to the port. 6637 */ 6638 phba->sli4_hba.bmbx.dmabuf = dmabuf; 6639 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 6640 6641 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 6642 LPFC_ALIGN_16_BYTE); 6643 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 6644 LPFC_ALIGN_16_BYTE); 6645 6646 /* 6647 * Set the high and low physical addresses now. The SLI4 alignment 6648 * requirement is 16 bytes and the mailbox is posted to the port 6649 * as two 30-bit addresses. The other data is a bit marking whether 6650 * the 30-bit address is the high or low address. 6651 * Upcast bmbx aphys to 64bits so shift instruction compiles 6652 * clean on 32 bit machines. 6653 */ 6654 dma_address = &phba->sli4_hba.bmbx.dma_address; 6655 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 6656 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 6657 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 6658 LPFC_BMBX_BIT1_ADDR_HI); 6659 6660 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 6661 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 6662 LPFC_BMBX_BIT1_ADDR_LO); 6663 return 0; 6664 } 6665 6666 /** 6667 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 6668 * @phba: pointer to lpfc hba data structure. 6669 * 6670 * This routine is invoked to teardown the bootstrap mailbox 6671 * region and release all host resources. This routine requires 6672 * the caller to ensure all mailbox commands recovered, no 6673 * additional mailbox comands are sent, and interrupts are disabled 6674 * before calling this routine. 6675 * 6676 **/ 6677 static void 6678 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 6679 { 6680 dma_free_coherent(&phba->pcidev->dev, 6681 phba->sli4_hba.bmbx.bmbx_size, 6682 phba->sli4_hba.bmbx.dmabuf->virt, 6683 phba->sli4_hba.bmbx.dmabuf->phys); 6684 6685 kfree(phba->sli4_hba.bmbx.dmabuf); 6686 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 6687 } 6688 6689 /** 6690 * lpfc_sli4_read_config - Get the config parameters. 6691 * @phba: pointer to lpfc hba data structure. 6692 * 6693 * This routine is invoked to read the configuration parameters from the HBA. 6694 * The configuration parameters are used to set the base and maximum values 6695 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 6696 * allocation for the port. 6697 * 6698 * Return codes 6699 * 0 - successful 6700 * -ENOMEM - No available memory 6701 * -EIO - The mailbox failed to complete successfully. 6702 **/ 6703 int 6704 lpfc_sli4_read_config(struct lpfc_hba *phba) 6705 { 6706 LPFC_MBOXQ_t *pmb; 6707 struct lpfc_mbx_read_config *rd_config; 6708 union lpfc_sli4_cfg_shdr *shdr; 6709 uint32_t shdr_status, shdr_add_status; 6710 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6711 struct lpfc_rsrc_desc_fcfcoe *desc; 6712 char *pdesc_0; 6713 uint32_t desc_count; 6714 int length, i, rc = 0, rc2; 6715 6716 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6717 if (!pmb) { 6718 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6719 "2011 Unable to allocate memory for issuing " 6720 "SLI_CONFIG_SPECIAL mailbox command\n"); 6721 return -ENOMEM; 6722 } 6723 6724 lpfc_read_config(phba, pmb); 6725 6726 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6727 if (rc != MBX_SUCCESS) { 6728 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6729 "2012 Mailbox failed , mbxCmd x%x " 6730 "READ_CONFIG, mbxStatus x%x\n", 6731 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6732 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6733 rc = -EIO; 6734 } else { 6735 rd_config = &pmb->u.mqe.un.rd_config; 6736 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 6737 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 6738 phba->sli4_hba.lnk_info.lnk_tp = 6739 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 6740 phba->sli4_hba.lnk_info.lnk_no = 6741 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 6742 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6743 "3081 lnk_type:%d, lnk_numb:%d\n", 6744 phba->sli4_hba.lnk_info.lnk_tp, 6745 phba->sli4_hba.lnk_info.lnk_no); 6746 } else 6747 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6748 "3082 Mailbox (x%x) returned ldv:x0\n", 6749 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 6750 phba->sli4_hba.extents_in_use = 6751 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 6752 phba->sli4_hba.max_cfg_param.max_xri = 6753 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 6754 phba->sli4_hba.max_cfg_param.xri_base = 6755 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 6756 phba->sli4_hba.max_cfg_param.max_vpi = 6757 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 6758 phba->sli4_hba.max_cfg_param.vpi_base = 6759 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 6760 phba->sli4_hba.max_cfg_param.max_rpi = 6761 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 6762 phba->sli4_hba.max_cfg_param.rpi_base = 6763 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 6764 phba->sli4_hba.max_cfg_param.max_vfi = 6765 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 6766 phba->sli4_hba.max_cfg_param.vfi_base = 6767 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 6768 phba->sli4_hba.max_cfg_param.max_fcfi = 6769 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 6770 phba->sli4_hba.max_cfg_param.max_eq = 6771 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 6772 phba->sli4_hba.max_cfg_param.max_rq = 6773 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 6774 phba->sli4_hba.max_cfg_param.max_wq = 6775 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 6776 phba->sli4_hba.max_cfg_param.max_cq = 6777 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 6778 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 6779 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 6780 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 6781 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 6782 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 6783 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 6784 phba->max_vports = phba->max_vpi; 6785 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6786 "2003 cfg params Extents? %d " 6787 "XRI(B:%d M:%d), " 6788 "VPI(B:%d M:%d) " 6789 "VFI(B:%d M:%d) " 6790 "RPI(B:%d M:%d) " 6791 "FCFI(Count:%d)\n", 6792 phba->sli4_hba.extents_in_use, 6793 phba->sli4_hba.max_cfg_param.xri_base, 6794 phba->sli4_hba.max_cfg_param.max_xri, 6795 phba->sli4_hba.max_cfg_param.vpi_base, 6796 phba->sli4_hba.max_cfg_param.max_vpi, 6797 phba->sli4_hba.max_cfg_param.vfi_base, 6798 phba->sli4_hba.max_cfg_param.max_vfi, 6799 phba->sli4_hba.max_cfg_param.rpi_base, 6800 phba->sli4_hba.max_cfg_param.max_rpi, 6801 phba->sli4_hba.max_cfg_param.max_fcfi); 6802 } 6803 6804 if (rc) 6805 goto read_cfg_out; 6806 6807 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 6808 length = phba->sli4_hba.max_cfg_param.max_xri - 6809 lpfc_sli4_get_els_iocb_cnt(phba); 6810 if (phba->cfg_hba_queue_depth > length) { 6811 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6812 "3361 HBA queue depth changed from %d to %d\n", 6813 phba->cfg_hba_queue_depth, length); 6814 phba->cfg_hba_queue_depth = length; 6815 } 6816 6817 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 6818 LPFC_SLI_INTF_IF_TYPE_2) 6819 goto read_cfg_out; 6820 6821 /* get the pf# and vf# for SLI4 if_type 2 port */ 6822 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 6823 sizeof(struct lpfc_sli4_cfg_mhdr)); 6824 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 6825 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 6826 length, LPFC_SLI4_MBX_EMBED); 6827 6828 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6829 shdr = (union lpfc_sli4_cfg_shdr *) 6830 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 6831 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6832 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6833 if (rc2 || shdr_status || shdr_add_status) { 6834 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6835 "3026 Mailbox failed , mbxCmd x%x " 6836 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 6837 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6838 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6839 goto read_cfg_out; 6840 } 6841 6842 /* search for fc_fcoe resrouce descriptor */ 6843 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6844 desc_count = get_func_cfg->func_cfg.rsrc_desc_count; 6845 6846 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 6847 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 6848 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 6849 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 6850 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 6851 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 6852 goto read_cfg_out; 6853 6854 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 6855 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 6856 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 6857 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 6858 phba->sli4_hba.iov.pf_number = 6859 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 6860 phba->sli4_hba.iov.vf_number = 6861 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 6862 break; 6863 } 6864 } 6865 6866 if (i < LPFC_RSRC_DESC_MAX_NUM) 6867 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6868 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 6869 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 6870 phba->sli4_hba.iov.vf_number); 6871 else 6872 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6873 "3028 GET_FUNCTION_CONFIG: failed to find " 6874 "Resrouce Descriptor:x%x\n", 6875 LPFC_RSRC_DESC_TYPE_FCFCOE); 6876 6877 read_cfg_out: 6878 mempool_free(pmb, phba->mbox_mem_pool); 6879 return rc; 6880 } 6881 6882 /** 6883 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 6884 * @phba: pointer to lpfc hba data structure. 6885 * 6886 * This routine is invoked to setup the port-side endian order when 6887 * the port if_type is 0. This routine has no function for other 6888 * if_types. 6889 * 6890 * Return codes 6891 * 0 - successful 6892 * -ENOMEM - No available memory 6893 * -EIO - The mailbox failed to complete successfully. 6894 **/ 6895 static int 6896 lpfc_setup_endian_order(struct lpfc_hba *phba) 6897 { 6898 LPFC_MBOXQ_t *mboxq; 6899 uint32_t if_type, rc = 0; 6900 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 6901 HOST_ENDIAN_HIGH_WORD1}; 6902 6903 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6904 switch (if_type) { 6905 case LPFC_SLI_INTF_IF_TYPE_0: 6906 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6907 GFP_KERNEL); 6908 if (!mboxq) { 6909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6910 "0492 Unable to allocate memory for " 6911 "issuing SLI_CONFIG_SPECIAL mailbox " 6912 "command\n"); 6913 return -ENOMEM; 6914 } 6915 6916 /* 6917 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 6918 * two words to contain special data values and no other data. 6919 */ 6920 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 6921 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 6922 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6923 if (rc != MBX_SUCCESS) { 6924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6925 "0493 SLI_CONFIG_SPECIAL mailbox " 6926 "failed with status x%x\n", 6927 rc); 6928 rc = -EIO; 6929 } 6930 mempool_free(mboxq, phba->mbox_mem_pool); 6931 break; 6932 case LPFC_SLI_INTF_IF_TYPE_2: 6933 case LPFC_SLI_INTF_IF_TYPE_1: 6934 default: 6935 break; 6936 } 6937 return rc; 6938 } 6939 6940 /** 6941 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts 6942 * @phba: pointer to lpfc hba data structure. 6943 * 6944 * This routine is invoked to check the user settable queue counts for EQs and 6945 * CQs. after this routine is called the counts will be set to valid values that 6946 * adhere to the constraints of the system's interrupt vectors and the port's 6947 * queue resources. 6948 * 6949 * Return codes 6950 * 0 - successful 6951 * -ENOMEM - No available memory 6952 **/ 6953 static int 6954 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 6955 { 6956 int cfg_fcp_io_channel; 6957 uint32_t cpu; 6958 uint32_t i = 0; 6959 int fof_vectors = phba->cfg_fof ? 1 : 0; 6960 6961 /* 6962 * Sanity check for configured queue parameters against the run-time 6963 * device parameters 6964 */ 6965 6966 /* Sanity check on HBA EQ parameters */ 6967 cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 6968 6969 /* It doesn't make sense to have more io channels then online CPUs */ 6970 for_each_present_cpu(cpu) { 6971 if (cpu_online(cpu)) 6972 i++; 6973 } 6974 phba->sli4_hba.num_online_cpu = i; 6975 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6976 phba->sli4_hba.curr_disp_cpu = 0; 6977 6978 if (i < cfg_fcp_io_channel) { 6979 lpfc_printf_log(phba, 6980 KERN_ERR, LOG_INIT, 6981 "3188 Reducing IO channels to match number of " 6982 "online CPUs: from %d to %d\n", 6983 cfg_fcp_io_channel, i); 6984 cfg_fcp_io_channel = i; 6985 } 6986 6987 if (cfg_fcp_io_channel + fof_vectors > 6988 phba->sli4_hba.max_cfg_param.max_eq) { 6989 if (phba->sli4_hba.max_cfg_param.max_eq < 6990 LPFC_FCP_IO_CHAN_MIN) { 6991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6992 "2574 Not enough EQs (%d) from the " 6993 "pci function for supporting FCP " 6994 "EQs (%d)\n", 6995 phba->sli4_hba.max_cfg_param.max_eq, 6996 phba->cfg_fcp_io_channel); 6997 goto out_error; 6998 } 6999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7000 "2575 Reducing IO channels to match number of " 7001 "available EQs: from %d to %d\n", 7002 cfg_fcp_io_channel, 7003 phba->sli4_hba.max_cfg_param.max_eq); 7004 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - 7005 fof_vectors; 7006 } 7007 7008 /* The actual number of FCP event queues adopted */ 7009 phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 7010 7011 /* Get EQ depth from module parameter, fake the default for now */ 7012 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 7013 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 7014 7015 /* Get CQ depth from module parameter, fake the default for now */ 7016 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 7017 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 7018 7019 return 0; 7020 out_error: 7021 return -ENOMEM; 7022 } 7023 7024 /** 7025 * lpfc_sli4_queue_create - Create all the SLI4 queues 7026 * @phba: pointer to lpfc hba data structure. 7027 * 7028 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 7029 * operation. For each SLI4 queue type, the parameters such as queue entry 7030 * count (queue depth) shall be taken from the module parameter. For now, 7031 * we just use some constant number as place holder. 7032 * 7033 * Return codes 7034 * 0 - successful 7035 * -ENOMEM - No availble memory 7036 * -EIO - The mailbox failed to complete successfully. 7037 **/ 7038 int 7039 lpfc_sli4_queue_create(struct lpfc_hba *phba) 7040 { 7041 struct lpfc_queue *qdesc; 7042 int idx; 7043 7044 /* 7045 * Create HBA Record arrays. 7046 */ 7047 if (!phba->cfg_fcp_io_channel) 7048 return -ERANGE; 7049 7050 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 7051 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 7052 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 7053 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 7054 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 7055 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 7056 7057 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * 7058 phba->cfg_fcp_io_channel), GFP_KERNEL); 7059 if (!phba->sli4_hba.hba_eq) { 7060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7061 "2576 Failed allocate memory for " 7062 "fast-path EQ record array\n"); 7063 goto out_error; 7064 } 7065 7066 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 7067 phba->cfg_fcp_io_channel), GFP_KERNEL); 7068 if (!phba->sli4_hba.fcp_cq) { 7069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7070 "2577 Failed allocate memory for fast-path " 7071 "CQ record array\n"); 7072 goto out_error; 7073 } 7074 7075 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 7076 phba->cfg_fcp_io_channel), GFP_KERNEL); 7077 if (!phba->sli4_hba.fcp_wq) { 7078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7079 "2578 Failed allocate memory for fast-path " 7080 "WQ record array\n"); 7081 goto out_error; 7082 } 7083 7084 /* 7085 * Since the first EQ can have multiple CQs associated with it, 7086 * this array is used to quickly see if we have a FCP fast-path 7087 * CQ match. 7088 */ 7089 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * 7090 phba->cfg_fcp_io_channel), GFP_KERNEL); 7091 if (!phba->sli4_hba.fcp_cq_map) { 7092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7093 "2545 Failed allocate memory for fast-path " 7094 "CQ map\n"); 7095 goto out_error; 7096 } 7097 7098 /* 7099 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies 7100 * how many EQs to create. 7101 */ 7102 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7103 7104 /* Create EQs */ 7105 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 7106 phba->sli4_hba.eq_ecount); 7107 if (!qdesc) { 7108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7109 "0497 Failed allocate EQ (%d)\n", idx); 7110 goto out_error; 7111 } 7112 phba->sli4_hba.hba_eq[idx] = qdesc; 7113 7114 /* Create Fast Path FCP CQs */ 7115 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7116 phba->sli4_hba.cq_ecount); 7117 if (!qdesc) { 7118 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7119 "0499 Failed allocate fast-path FCP " 7120 "CQ (%d)\n", idx); 7121 goto out_error; 7122 } 7123 phba->sli4_hba.fcp_cq[idx] = qdesc; 7124 7125 /* Create Fast Path FCP WQs */ 7126 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 7127 phba->sli4_hba.wq_ecount); 7128 if (!qdesc) { 7129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7130 "0503 Failed allocate fast-path FCP " 7131 "WQ (%d)\n", idx); 7132 goto out_error; 7133 } 7134 phba->sli4_hba.fcp_wq[idx] = qdesc; 7135 } 7136 7137 7138 /* 7139 * Create Slow Path Completion Queues (CQs) 7140 */ 7141 7142 /* Create slow-path Mailbox Command Complete Queue */ 7143 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7144 phba->sli4_hba.cq_ecount); 7145 if (!qdesc) { 7146 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7147 "0500 Failed allocate slow-path mailbox CQ\n"); 7148 goto out_error; 7149 } 7150 phba->sli4_hba.mbx_cq = qdesc; 7151 7152 /* Create slow-path ELS Complete Queue */ 7153 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7154 phba->sli4_hba.cq_ecount); 7155 if (!qdesc) { 7156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7157 "0501 Failed allocate slow-path ELS CQ\n"); 7158 goto out_error; 7159 } 7160 phba->sli4_hba.els_cq = qdesc; 7161 7162 7163 /* 7164 * Create Slow Path Work Queues (WQs) 7165 */ 7166 7167 /* Create Mailbox Command Queue */ 7168 7169 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 7170 phba->sli4_hba.mq_ecount); 7171 if (!qdesc) { 7172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7173 "0505 Failed allocate slow-path MQ\n"); 7174 goto out_error; 7175 } 7176 phba->sli4_hba.mbx_wq = qdesc; 7177 7178 /* 7179 * Create ELS Work Queues 7180 */ 7181 7182 /* Create slow-path ELS Work Queue */ 7183 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 7184 phba->sli4_hba.wq_ecount); 7185 if (!qdesc) { 7186 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7187 "0504 Failed allocate slow-path ELS WQ\n"); 7188 goto out_error; 7189 } 7190 phba->sli4_hba.els_wq = qdesc; 7191 7192 /* 7193 * Create Receive Queue (RQ) 7194 */ 7195 7196 /* Create Receive Queue for header */ 7197 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7198 phba->sli4_hba.rq_ecount); 7199 if (!qdesc) { 7200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7201 "0506 Failed allocate receive HRQ\n"); 7202 goto out_error; 7203 } 7204 phba->sli4_hba.hdr_rq = qdesc; 7205 7206 /* Create Receive Queue for data */ 7207 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7208 phba->sli4_hba.rq_ecount); 7209 if (!qdesc) { 7210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7211 "0507 Failed allocate receive DRQ\n"); 7212 goto out_error; 7213 } 7214 phba->sli4_hba.dat_rq = qdesc; 7215 7216 /* Create the Queues needed for Flash Optimized Fabric operations */ 7217 if (phba->cfg_fof) 7218 lpfc_fof_queue_create(phba); 7219 return 0; 7220 7221 out_error: 7222 lpfc_sli4_queue_destroy(phba); 7223 return -ENOMEM; 7224 } 7225 7226 /** 7227 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 7228 * @phba: pointer to lpfc hba data structure. 7229 * 7230 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 7231 * operation. 7232 * 7233 * Return codes 7234 * 0 - successful 7235 * -ENOMEM - No available memory 7236 * -EIO - The mailbox failed to complete successfully. 7237 **/ 7238 void 7239 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 7240 { 7241 int idx; 7242 7243 if (phba->cfg_fof) 7244 lpfc_fof_queue_destroy(phba); 7245 7246 if (phba->sli4_hba.hba_eq != NULL) { 7247 /* Release HBA event queue */ 7248 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7249 if (phba->sli4_hba.hba_eq[idx] != NULL) { 7250 lpfc_sli4_queue_free( 7251 phba->sli4_hba.hba_eq[idx]); 7252 phba->sli4_hba.hba_eq[idx] = NULL; 7253 } 7254 } 7255 kfree(phba->sli4_hba.hba_eq); 7256 phba->sli4_hba.hba_eq = NULL; 7257 } 7258 7259 if (phba->sli4_hba.fcp_cq != NULL) { 7260 /* Release FCP completion queue */ 7261 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7262 if (phba->sli4_hba.fcp_cq[idx] != NULL) { 7263 lpfc_sli4_queue_free( 7264 phba->sli4_hba.fcp_cq[idx]); 7265 phba->sli4_hba.fcp_cq[idx] = NULL; 7266 } 7267 } 7268 kfree(phba->sli4_hba.fcp_cq); 7269 phba->sli4_hba.fcp_cq = NULL; 7270 } 7271 7272 if (phba->sli4_hba.fcp_wq != NULL) { 7273 /* Release FCP work queue */ 7274 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7275 if (phba->sli4_hba.fcp_wq[idx] != NULL) { 7276 lpfc_sli4_queue_free( 7277 phba->sli4_hba.fcp_wq[idx]); 7278 phba->sli4_hba.fcp_wq[idx] = NULL; 7279 } 7280 } 7281 kfree(phba->sli4_hba.fcp_wq); 7282 phba->sli4_hba.fcp_wq = NULL; 7283 } 7284 7285 /* Release FCP CQ mapping array */ 7286 if (phba->sli4_hba.fcp_cq_map != NULL) { 7287 kfree(phba->sli4_hba.fcp_cq_map); 7288 phba->sli4_hba.fcp_cq_map = NULL; 7289 } 7290 7291 /* Release mailbox command work queue */ 7292 if (phba->sli4_hba.mbx_wq != NULL) { 7293 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 7294 phba->sli4_hba.mbx_wq = NULL; 7295 } 7296 7297 /* Release ELS work queue */ 7298 if (phba->sli4_hba.els_wq != NULL) { 7299 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 7300 phba->sli4_hba.els_wq = NULL; 7301 } 7302 7303 /* Release unsolicited receive queue */ 7304 if (phba->sli4_hba.hdr_rq != NULL) { 7305 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 7306 phba->sli4_hba.hdr_rq = NULL; 7307 } 7308 if (phba->sli4_hba.dat_rq != NULL) { 7309 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 7310 phba->sli4_hba.dat_rq = NULL; 7311 } 7312 7313 /* Release ELS complete queue */ 7314 if (phba->sli4_hba.els_cq != NULL) { 7315 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 7316 phba->sli4_hba.els_cq = NULL; 7317 } 7318 7319 /* Release mailbox command complete queue */ 7320 if (phba->sli4_hba.mbx_cq != NULL) { 7321 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 7322 phba->sli4_hba.mbx_cq = NULL; 7323 } 7324 7325 return; 7326 } 7327 7328 /** 7329 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 7330 * @phba: pointer to lpfc hba data structure. 7331 * 7332 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 7333 * operation. 7334 * 7335 * Return codes 7336 * 0 - successful 7337 * -ENOMEM - No available memory 7338 * -EIO - The mailbox failed to complete successfully. 7339 **/ 7340 int 7341 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 7342 { 7343 struct lpfc_sli *psli = &phba->sli; 7344 struct lpfc_sli_ring *pring; 7345 int rc = -ENOMEM; 7346 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 7347 int fcp_cq_index = 0; 7348 uint32_t shdr_status, shdr_add_status; 7349 union lpfc_sli4_cfg_shdr *shdr; 7350 LPFC_MBOXQ_t *mboxq; 7351 uint32_t length; 7352 7353 /* Check for dual-ULP support */ 7354 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7355 if (!mboxq) { 7356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7357 "3249 Unable to allocate memory for " 7358 "QUERY_FW_CFG mailbox command\n"); 7359 return -ENOMEM; 7360 } 7361 length = (sizeof(struct lpfc_mbx_query_fw_config) - 7362 sizeof(struct lpfc_sli4_cfg_mhdr)); 7363 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7364 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 7365 length, LPFC_SLI4_MBX_EMBED); 7366 7367 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7368 7369 shdr = (union lpfc_sli4_cfg_shdr *) 7370 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7371 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7372 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7373 if (shdr_status || shdr_add_status || rc) { 7374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7375 "3250 QUERY_FW_CFG mailbox failed with status " 7376 "x%x add_status x%x, mbx status x%x\n", 7377 shdr_status, shdr_add_status, rc); 7378 if (rc != MBX_TIMEOUT) 7379 mempool_free(mboxq, phba->mbox_mem_pool); 7380 rc = -ENXIO; 7381 goto out_error; 7382 } 7383 7384 phba->sli4_hba.fw_func_mode = 7385 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 7386 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 7387 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 7388 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7389 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 7390 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 7391 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 7392 7393 if (rc != MBX_TIMEOUT) 7394 mempool_free(mboxq, phba->mbox_mem_pool); 7395 7396 /* 7397 * Set up HBA Event Queues (EQs) 7398 */ 7399 7400 /* Set up HBA event queue */ 7401 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { 7402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7403 "3147 Fast-path EQs not allocated\n"); 7404 rc = -ENOMEM; 7405 goto out_error; 7406 } 7407 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 7408 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { 7409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7410 "0522 Fast-path EQ (%d) not " 7411 "allocated\n", fcp_eqidx); 7412 rc = -ENOMEM; 7413 goto out_destroy_hba_eq; 7414 } 7415 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], 7416 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel)); 7417 if (rc) { 7418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7419 "0523 Failed setup of fast-path EQ " 7420 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 7421 goto out_destroy_hba_eq; 7422 } 7423 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7424 "2584 HBA EQ setup: " 7425 "queue[%d]-id=%d\n", fcp_eqidx, 7426 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); 7427 } 7428 7429 /* Set up fast-path FCP Response Complete Queue */ 7430 if (!phba->sli4_hba.fcp_cq) { 7431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7432 "3148 Fast-path FCP CQ array not " 7433 "allocated\n"); 7434 rc = -ENOMEM; 7435 goto out_destroy_hba_eq; 7436 } 7437 7438 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { 7439 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 7440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7441 "0526 Fast-path FCP CQ (%d) not " 7442 "allocated\n", fcp_cqidx); 7443 rc = -ENOMEM; 7444 goto out_destroy_fcp_cq; 7445 } 7446 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 7447 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); 7448 if (rc) { 7449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7450 "0527 Failed setup of fast-path FCP " 7451 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 7452 goto out_destroy_fcp_cq; 7453 } 7454 7455 /* Setup fcp_cq_map for fast lookup */ 7456 phba->sli4_hba.fcp_cq_map[fcp_cqidx] = 7457 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; 7458 7459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7460 "2588 FCP CQ setup: cq[%d]-id=%d, " 7461 "parent seq[%d]-id=%d\n", 7462 fcp_cqidx, 7463 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 7464 fcp_cqidx, 7465 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); 7466 } 7467 7468 /* Set up fast-path FCP Work Queue */ 7469 if (!phba->sli4_hba.fcp_wq) { 7470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7471 "3149 Fast-path FCP WQ array not " 7472 "allocated\n"); 7473 rc = -ENOMEM; 7474 goto out_destroy_fcp_cq; 7475 } 7476 7477 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { 7478 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 7479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7480 "0534 Fast-path FCP WQ (%d) not " 7481 "allocated\n", fcp_wqidx); 7482 rc = -ENOMEM; 7483 goto out_destroy_fcp_wq; 7484 } 7485 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 7486 phba->sli4_hba.fcp_cq[fcp_wqidx], 7487 LPFC_FCP); 7488 if (rc) { 7489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7490 "0535 Failed setup of fast-path FCP " 7491 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 7492 goto out_destroy_fcp_wq; 7493 } 7494 7495 /* Bind this WQ to the next FCP ring */ 7496 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; 7497 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; 7498 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; 7499 7500 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7501 "2591 FCP WQ setup: wq[%d]-id=%d, " 7502 "parent cq[%d]-id=%d\n", 7503 fcp_wqidx, 7504 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 7505 fcp_cq_index, 7506 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); 7507 } 7508 /* 7509 * Set up Complete Queues (CQs) 7510 */ 7511 7512 /* Set up slow-path MBOX Complete Queue as the first CQ */ 7513 if (!phba->sli4_hba.mbx_cq) { 7514 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7515 "0528 Mailbox CQ not allocated\n"); 7516 rc = -ENOMEM; 7517 goto out_destroy_fcp_wq; 7518 } 7519 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, 7520 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); 7521 if (rc) { 7522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7523 "0529 Failed setup of slow-path mailbox CQ: " 7524 "rc = 0x%x\n", rc); 7525 goto out_destroy_fcp_wq; 7526 } 7527 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7528 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7529 phba->sli4_hba.mbx_cq->queue_id, 7530 phba->sli4_hba.hba_eq[0]->queue_id); 7531 7532 /* Set up slow-path ELS Complete Queue */ 7533 if (!phba->sli4_hba.els_cq) { 7534 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7535 "0530 ELS CQ not allocated\n"); 7536 rc = -ENOMEM; 7537 goto out_destroy_mbx_cq; 7538 } 7539 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, 7540 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); 7541 if (rc) { 7542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7543 "0531 Failed setup of slow-path ELS CQ: " 7544 "rc = 0x%x\n", rc); 7545 goto out_destroy_mbx_cq; 7546 } 7547 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7548 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7549 phba->sli4_hba.els_cq->queue_id, 7550 phba->sli4_hba.hba_eq[0]->queue_id); 7551 7552 /* 7553 * Set up all the Work Queues (WQs) 7554 */ 7555 7556 /* Set up Mailbox Command Queue */ 7557 if (!phba->sli4_hba.mbx_wq) { 7558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7559 "0538 Slow-path MQ not allocated\n"); 7560 rc = -ENOMEM; 7561 goto out_destroy_els_cq; 7562 } 7563 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7564 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7565 if (rc) { 7566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7567 "0539 Failed setup of slow-path MQ: " 7568 "rc = 0x%x\n", rc); 7569 goto out_destroy_els_cq; 7570 } 7571 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7572 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7573 phba->sli4_hba.mbx_wq->queue_id, 7574 phba->sli4_hba.mbx_cq->queue_id); 7575 7576 /* Set up slow-path ELS Work Queue */ 7577 if (!phba->sli4_hba.els_wq) { 7578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7579 "0536 Slow-path ELS WQ not allocated\n"); 7580 rc = -ENOMEM; 7581 goto out_destroy_mbx_wq; 7582 } 7583 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 7584 phba->sli4_hba.els_cq, LPFC_ELS); 7585 if (rc) { 7586 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7587 "0537 Failed setup of slow-path ELS WQ: " 7588 "rc = 0x%x\n", rc); 7589 goto out_destroy_mbx_wq; 7590 } 7591 7592 /* Bind this WQ to the ELS ring */ 7593 pring = &psli->ring[LPFC_ELS_RING]; 7594 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; 7595 phba->sli4_hba.els_cq->pring = pring; 7596 7597 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7598 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 7599 phba->sli4_hba.els_wq->queue_id, 7600 phba->sli4_hba.els_cq->queue_id); 7601 7602 /* 7603 * Create Receive Queue (RQ) 7604 */ 7605 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 7606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7607 "0540 Receive Queue not allocated\n"); 7608 rc = -ENOMEM; 7609 goto out_destroy_els_wq; 7610 } 7611 7612 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7613 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); 7614 7615 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 7616 phba->sli4_hba.els_cq, LPFC_USOL); 7617 if (rc) { 7618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7619 "0541 Failed setup of Receive Queue: " 7620 "rc = 0x%x\n", rc); 7621 goto out_destroy_fcp_wq; 7622 } 7623 7624 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7625 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 7626 "parent cq-id=%d\n", 7627 phba->sli4_hba.hdr_rq->queue_id, 7628 phba->sli4_hba.dat_rq->queue_id, 7629 phba->sli4_hba.els_cq->queue_id); 7630 7631 if (phba->cfg_fof) { 7632 rc = lpfc_fof_queue_setup(phba); 7633 if (rc) { 7634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7635 "0549 Failed setup of FOF Queues: " 7636 "rc = 0x%x\n", rc); 7637 goto out_destroy_els_rq; 7638 } 7639 } 7640 return 0; 7641 7642 out_destroy_els_rq: 7643 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7644 out_destroy_els_wq: 7645 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7646 out_destroy_mbx_wq: 7647 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7648 out_destroy_els_cq: 7649 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7650 out_destroy_mbx_cq: 7651 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7652 out_destroy_fcp_wq: 7653 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 7654 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 7655 out_destroy_fcp_cq: 7656 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 7657 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 7658 out_destroy_hba_eq: 7659 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7660 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); 7661 out_error: 7662 return rc; 7663 } 7664 7665 /** 7666 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 7667 * @phba: pointer to lpfc hba data structure. 7668 * 7669 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 7670 * operation. 7671 * 7672 * Return codes 7673 * 0 - successful 7674 * -ENOMEM - No available memory 7675 * -EIO - The mailbox failed to complete successfully. 7676 **/ 7677 void 7678 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 7679 { 7680 int fcp_qidx; 7681 7682 /* Unset the queues created for Flash Optimized Fabric operations */ 7683 if (phba->cfg_fof) 7684 lpfc_fof_queue_destroy(phba); 7685 /* Unset mailbox command work queue */ 7686 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7687 /* Unset ELS work queue */ 7688 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7689 /* Unset unsolicited receive queue */ 7690 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7691 /* Unset FCP work queue */ 7692 if (phba->sli4_hba.fcp_wq) { 7693 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7694 fcp_qidx++) 7695 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7696 } 7697 /* Unset mailbox command complete queue */ 7698 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7699 /* Unset ELS complete queue */ 7700 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7701 /* Unset FCP response complete queue */ 7702 if (phba->sli4_hba.fcp_cq) { 7703 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7704 fcp_qidx++) 7705 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7706 } 7707 /* Unset fast-path event queue */ 7708 if (phba->sli4_hba.hba_eq) { 7709 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7710 fcp_qidx++) 7711 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); 7712 } 7713 } 7714 7715 /** 7716 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 7717 * @phba: pointer to lpfc hba data structure. 7718 * 7719 * This routine is invoked to allocate and set up a pool of completion queue 7720 * events. The body of the completion queue event is a completion queue entry 7721 * CQE. For now, this pool is used for the interrupt service routine to queue 7722 * the following HBA completion queue events for the worker thread to process: 7723 * - Mailbox asynchronous events 7724 * - Receive queue completion unsolicited events 7725 * Later, this can be used for all the slow-path events. 7726 * 7727 * Return codes 7728 * 0 - successful 7729 * -ENOMEM - No available memory 7730 **/ 7731 static int 7732 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 7733 { 7734 struct lpfc_cq_event *cq_event; 7735 int i; 7736 7737 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 7738 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 7739 if (!cq_event) 7740 goto out_pool_create_fail; 7741 list_add_tail(&cq_event->list, 7742 &phba->sli4_hba.sp_cqe_event_pool); 7743 } 7744 return 0; 7745 7746 out_pool_create_fail: 7747 lpfc_sli4_cq_event_pool_destroy(phba); 7748 return -ENOMEM; 7749 } 7750 7751 /** 7752 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 7753 * @phba: pointer to lpfc hba data structure. 7754 * 7755 * This routine is invoked to free the pool of completion queue events at 7756 * driver unload time. Note that, it is the responsibility of the driver 7757 * cleanup routine to free all the outstanding completion-queue events 7758 * allocated from this pool back into the pool before invoking this routine 7759 * to destroy the pool. 7760 **/ 7761 static void 7762 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 7763 { 7764 struct lpfc_cq_event *cq_event, *next_cq_event; 7765 7766 list_for_each_entry_safe(cq_event, next_cq_event, 7767 &phba->sli4_hba.sp_cqe_event_pool, list) { 7768 list_del(&cq_event->list); 7769 kfree(cq_event); 7770 } 7771 } 7772 7773 /** 7774 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 7775 * @phba: pointer to lpfc hba data structure. 7776 * 7777 * This routine is the lock free version of the API invoked to allocate a 7778 * completion-queue event from the free pool. 7779 * 7780 * Return: Pointer to the newly allocated completion-queue event if successful 7781 * NULL otherwise. 7782 **/ 7783 struct lpfc_cq_event * 7784 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 7785 { 7786 struct lpfc_cq_event *cq_event = NULL; 7787 7788 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 7789 struct lpfc_cq_event, list); 7790 return cq_event; 7791 } 7792 7793 /** 7794 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 7795 * @phba: pointer to lpfc hba data structure. 7796 * 7797 * This routine is the lock version of the API invoked to allocate a 7798 * completion-queue event from the free pool. 7799 * 7800 * Return: Pointer to the newly allocated completion-queue event if successful 7801 * NULL otherwise. 7802 **/ 7803 struct lpfc_cq_event * 7804 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 7805 { 7806 struct lpfc_cq_event *cq_event; 7807 unsigned long iflags; 7808 7809 spin_lock_irqsave(&phba->hbalock, iflags); 7810 cq_event = __lpfc_sli4_cq_event_alloc(phba); 7811 spin_unlock_irqrestore(&phba->hbalock, iflags); 7812 return cq_event; 7813 } 7814 7815 /** 7816 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 7817 * @phba: pointer to lpfc hba data structure. 7818 * @cq_event: pointer to the completion queue event to be freed. 7819 * 7820 * This routine is the lock free version of the API invoked to release a 7821 * completion-queue event back into the free pool. 7822 **/ 7823 void 7824 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 7825 struct lpfc_cq_event *cq_event) 7826 { 7827 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 7828 } 7829 7830 /** 7831 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 7832 * @phba: pointer to lpfc hba data structure. 7833 * @cq_event: pointer to the completion queue event to be freed. 7834 * 7835 * This routine is the lock version of the API invoked to release a 7836 * completion-queue event back into the free pool. 7837 **/ 7838 void 7839 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 7840 struct lpfc_cq_event *cq_event) 7841 { 7842 unsigned long iflags; 7843 spin_lock_irqsave(&phba->hbalock, iflags); 7844 __lpfc_sli4_cq_event_release(phba, cq_event); 7845 spin_unlock_irqrestore(&phba->hbalock, iflags); 7846 } 7847 7848 /** 7849 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 7850 * @phba: pointer to lpfc hba data structure. 7851 * 7852 * This routine is to free all the pending completion-queue events to the 7853 * back into the free pool for device reset. 7854 **/ 7855 static void 7856 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 7857 { 7858 LIST_HEAD(cqelist); 7859 struct lpfc_cq_event *cqe; 7860 unsigned long iflags; 7861 7862 /* Retrieve all the pending WCQEs from pending WCQE lists */ 7863 spin_lock_irqsave(&phba->hbalock, iflags); 7864 /* Pending FCP XRI abort events */ 7865 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 7866 &cqelist); 7867 /* Pending ELS XRI abort events */ 7868 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 7869 &cqelist); 7870 /* Pending asynnc events */ 7871 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 7872 &cqelist); 7873 spin_unlock_irqrestore(&phba->hbalock, iflags); 7874 7875 while (!list_empty(&cqelist)) { 7876 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 7877 lpfc_sli4_cq_event_release(phba, cqe); 7878 } 7879 } 7880 7881 /** 7882 * lpfc_pci_function_reset - Reset pci function. 7883 * @phba: pointer to lpfc hba data structure. 7884 * 7885 * This routine is invoked to request a PCI function reset. It will destroys 7886 * all resources assigned to the PCI function which originates this request. 7887 * 7888 * Return codes 7889 * 0 - successful 7890 * -ENOMEM - No available memory 7891 * -EIO - The mailbox failed to complete successfully. 7892 **/ 7893 int 7894 lpfc_pci_function_reset(struct lpfc_hba *phba) 7895 { 7896 LPFC_MBOXQ_t *mboxq; 7897 uint32_t rc = 0, if_type; 7898 uint32_t shdr_status, shdr_add_status; 7899 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 7900 union lpfc_sli4_cfg_shdr *shdr; 7901 struct lpfc_register reg_data; 7902 uint16_t devid; 7903 7904 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7905 switch (if_type) { 7906 case LPFC_SLI_INTF_IF_TYPE_0: 7907 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7908 GFP_KERNEL); 7909 if (!mboxq) { 7910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7911 "0494 Unable to allocate memory for " 7912 "issuing SLI_FUNCTION_RESET mailbox " 7913 "command\n"); 7914 return -ENOMEM; 7915 } 7916 7917 /* Setup PCI function reset mailbox-ioctl command */ 7918 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7919 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 7920 LPFC_SLI4_MBX_EMBED); 7921 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7922 shdr = (union lpfc_sli4_cfg_shdr *) 7923 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7924 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7925 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 7926 &shdr->response); 7927 if (rc != MBX_TIMEOUT) 7928 mempool_free(mboxq, phba->mbox_mem_pool); 7929 if (shdr_status || shdr_add_status || rc) { 7930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7931 "0495 SLI_FUNCTION_RESET mailbox " 7932 "failed with status x%x add_status x%x," 7933 " mbx status x%x\n", 7934 shdr_status, shdr_add_status, rc); 7935 rc = -ENXIO; 7936 } 7937 break; 7938 case LPFC_SLI_INTF_IF_TYPE_2: 7939 for (num_resets = 0; 7940 num_resets < MAX_IF_TYPE_2_RESETS; 7941 num_resets++) { 7942 reg_data.word0 = 0; 7943 bf_set(lpfc_sliport_ctrl_end, ®_data, 7944 LPFC_SLIPORT_LITTLE_ENDIAN); 7945 bf_set(lpfc_sliport_ctrl_ip, ®_data, 7946 LPFC_SLIPORT_INIT_PORT); 7947 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 7948 CTRLregaddr); 7949 /* flush */ 7950 pci_read_config_word(phba->pcidev, 7951 PCI_DEVICE_ID, &devid); 7952 /* 7953 * Poll the Port Status Register and wait for RDY for 7954 * up to 10 seconds. If the port doesn't respond, treat 7955 * it as an error. If the port responds with RN, start 7956 * the loop again. 7957 */ 7958 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 7959 msleep(10); 7960 if (lpfc_readl(phba->sli4_hba.u.if_type2. 7961 STATUSregaddr, ®_data.word0)) { 7962 rc = -ENODEV; 7963 goto out; 7964 } 7965 if (bf_get(lpfc_sliport_status_rn, ®_data)) 7966 reset_again++; 7967 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 7968 break; 7969 } 7970 7971 /* 7972 * If the port responds to the init request with 7973 * reset needed, delay for a bit and restart the loop. 7974 */ 7975 if (reset_again && (rdy_chk < 1000)) { 7976 msleep(10); 7977 reset_again = 0; 7978 continue; 7979 } 7980 7981 /* Detect any port errors. */ 7982 if ((bf_get(lpfc_sliport_status_err, ®_data)) || 7983 (rdy_chk >= 1000)) { 7984 phba->work_status[0] = readl( 7985 phba->sli4_hba.u.if_type2.ERR1regaddr); 7986 phba->work_status[1] = readl( 7987 phba->sli4_hba.u.if_type2.ERR2regaddr); 7988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7989 "2890 Port error detected during port " 7990 "reset(%d): wait_tmo:%d ms, " 7991 "port status reg 0x%x, " 7992 "error 1=0x%x, error 2=0x%x\n", 7993 num_resets, rdy_chk*10, 7994 reg_data.word0, 7995 phba->work_status[0], 7996 phba->work_status[1]); 7997 rc = -ENODEV; 7998 } 7999 8000 /* 8001 * Terminate the outer loop provided the Port indicated 8002 * ready within 10 seconds. 8003 */ 8004 if (rdy_chk < 1000) 8005 break; 8006 } 8007 /* delay driver action following IF_TYPE_2 function reset */ 8008 msleep(100); 8009 break; 8010 case LPFC_SLI_INTF_IF_TYPE_1: 8011 default: 8012 break; 8013 } 8014 8015 out: 8016 /* Catch the not-ready port failure after a port reset. */ 8017 if (num_resets >= MAX_IF_TYPE_2_RESETS) { 8018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8019 "3317 HBA not functional: IP Reset Failed " 8020 "after (%d) retries, try: " 8021 "echo fw_reset > board_mode\n", num_resets); 8022 rc = -ENODEV; 8023 } 8024 8025 return rc; 8026 } 8027 8028 /** 8029 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 8030 * @phba: pointer to lpfc hba data structure. 8031 * 8032 * This routine is invoked to set up the PCI device memory space for device 8033 * with SLI-4 interface spec. 8034 * 8035 * Return codes 8036 * 0 - successful 8037 * other values - error 8038 **/ 8039 static int 8040 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 8041 { 8042 struct pci_dev *pdev; 8043 unsigned long bar0map_len, bar1map_len, bar2map_len; 8044 int error = -ENODEV; 8045 uint32_t if_type; 8046 8047 /* Obtain PCI device reference */ 8048 if (!phba->pcidev) 8049 return error; 8050 else 8051 pdev = phba->pcidev; 8052 8053 /* Set the device DMA mask size */ 8054 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 8055 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 8056 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 8057 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 8058 return error; 8059 } 8060 } 8061 8062 /* 8063 * The BARs and register set definitions and offset locations are 8064 * dependent on the if_type. 8065 */ 8066 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 8067 &phba->sli4_hba.sli_intf.word0)) { 8068 return error; 8069 } 8070 8071 /* There is no SLI3 failback for SLI4 devices. */ 8072 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 8073 LPFC_SLI_INTF_VALID) { 8074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8075 "2894 SLI_INTF reg contents invalid " 8076 "sli_intf reg 0x%x\n", 8077 phba->sli4_hba.sli_intf.word0); 8078 return error; 8079 } 8080 8081 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8082 /* 8083 * Get the bus address of SLI4 device Bar regions and the 8084 * number of bytes required by each mapping. The mapping of the 8085 * particular PCI BARs regions is dependent on the type of 8086 * SLI4 device. 8087 */ 8088 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 8089 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 8090 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 8091 8092 /* 8093 * Map SLI4 PCI Config Space Register base to a kernel virtual 8094 * addr 8095 */ 8096 phba->sli4_hba.conf_regs_memmap_p = 8097 ioremap(phba->pci_bar0_map, bar0map_len); 8098 if (!phba->sli4_hba.conf_regs_memmap_p) { 8099 dev_printk(KERN_ERR, &pdev->dev, 8100 "ioremap failed for SLI4 PCI config " 8101 "registers.\n"); 8102 goto out; 8103 } 8104 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 8105 /* Set up BAR0 PCI config space register memory map */ 8106 lpfc_sli4_bar0_register_memmap(phba, if_type); 8107 } else { 8108 phba->pci_bar0_map = pci_resource_start(pdev, 1); 8109 bar0map_len = pci_resource_len(pdev, 1); 8110 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8111 dev_printk(KERN_ERR, &pdev->dev, 8112 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 8113 goto out; 8114 } 8115 phba->sli4_hba.conf_regs_memmap_p = 8116 ioremap(phba->pci_bar0_map, bar0map_len); 8117 if (!phba->sli4_hba.conf_regs_memmap_p) { 8118 dev_printk(KERN_ERR, &pdev->dev, 8119 "ioremap failed for SLI4 PCI config " 8120 "registers.\n"); 8121 goto out; 8122 } 8123 lpfc_sli4_bar0_register_memmap(phba, if_type); 8124 } 8125 8126 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8127 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 8128 /* 8129 * Map SLI4 if type 0 HBA Control Register base to a kernel 8130 * virtual address and setup the registers. 8131 */ 8132 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 8133 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 8134 phba->sli4_hba.ctrl_regs_memmap_p = 8135 ioremap(phba->pci_bar1_map, bar1map_len); 8136 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 8137 dev_printk(KERN_ERR, &pdev->dev, 8138 "ioremap failed for SLI4 HBA control registers.\n"); 8139 goto out_iounmap_conf; 8140 } 8141 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; 8142 lpfc_sli4_bar1_register_memmap(phba); 8143 } 8144 8145 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8146 (pci_resource_start(pdev, PCI_64BIT_BAR4))) { 8147 /* 8148 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 8149 * virtual address and setup the registers. 8150 */ 8151 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 8152 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 8153 phba->sli4_hba.drbl_regs_memmap_p = 8154 ioremap(phba->pci_bar2_map, bar2map_len); 8155 if (!phba->sli4_hba.drbl_regs_memmap_p) { 8156 dev_printk(KERN_ERR, &pdev->dev, 8157 "ioremap failed for SLI4 HBA doorbell registers.\n"); 8158 goto out_iounmap_ctrl; 8159 } 8160 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 8161 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 8162 if (error) 8163 goto out_iounmap_all; 8164 } 8165 8166 return 0; 8167 8168 out_iounmap_all: 8169 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8170 out_iounmap_ctrl: 8171 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8172 out_iounmap_conf: 8173 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8174 out: 8175 return error; 8176 } 8177 8178 /** 8179 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 8180 * @phba: pointer to lpfc hba data structure. 8181 * 8182 * This routine is invoked to unset the PCI device memory space for device 8183 * with SLI-4 interface spec. 8184 **/ 8185 static void 8186 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 8187 { 8188 uint32_t if_type; 8189 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8190 8191 switch (if_type) { 8192 case LPFC_SLI_INTF_IF_TYPE_0: 8193 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8194 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8195 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8196 break; 8197 case LPFC_SLI_INTF_IF_TYPE_2: 8198 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8199 break; 8200 case LPFC_SLI_INTF_IF_TYPE_1: 8201 default: 8202 dev_printk(KERN_ERR, &phba->pcidev->dev, 8203 "FATAL - unsupported SLI4 interface type - %d\n", 8204 if_type); 8205 break; 8206 } 8207 } 8208 8209 /** 8210 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 8211 * @phba: pointer to lpfc hba data structure. 8212 * 8213 * This routine is invoked to enable the MSI-X interrupt vectors to device 8214 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 8215 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 8216 * invoked, enables either all or nothing, depending on the current 8217 * availability of PCI vector resources. The device driver is responsible 8218 * for calling the individual request_irq() to register each MSI-X vector 8219 * with a interrupt handler, which is done in this function. Note that 8220 * later when device is unloading, the driver should always call free_irq() 8221 * on all MSI-X vectors it has done request_irq() on before calling 8222 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 8223 * will be left with MSI-X enabled and leaks its vectors. 8224 * 8225 * Return codes 8226 * 0 - successful 8227 * other values - error 8228 **/ 8229 static int 8230 lpfc_sli_enable_msix(struct lpfc_hba *phba) 8231 { 8232 int rc, i; 8233 LPFC_MBOXQ_t *pmb; 8234 8235 /* Set up MSI-X multi-message vectors */ 8236 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8237 phba->msix_entries[i].entry = i; 8238 8239 /* Configure MSI-X capability structure */ 8240 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 8241 ARRAY_SIZE(phba->msix_entries)); 8242 if (rc) { 8243 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8244 "0420 PCI enable MSI-X failed (%d)\n", rc); 8245 goto msi_fail_out; 8246 } 8247 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8249 "0477 MSI-X entry[%d]: vector=x%x " 8250 "message=%d\n", i, 8251 phba->msix_entries[i].vector, 8252 phba->msix_entries[i].entry); 8253 /* 8254 * Assign MSI-X vectors to interrupt handlers 8255 */ 8256 8257 /* vector-0 is associated to slow-path handler */ 8258 rc = request_irq(phba->msix_entries[0].vector, 8259 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 8260 LPFC_SP_DRIVER_HANDLER_NAME, phba); 8261 if (rc) { 8262 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8263 "0421 MSI-X slow-path request_irq failed " 8264 "(%d)\n", rc); 8265 goto msi_fail_out; 8266 } 8267 8268 /* vector-1 is associated to fast-path handler */ 8269 rc = request_irq(phba->msix_entries[1].vector, 8270 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 8271 LPFC_FP_DRIVER_HANDLER_NAME, phba); 8272 8273 if (rc) { 8274 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8275 "0429 MSI-X fast-path request_irq failed " 8276 "(%d)\n", rc); 8277 goto irq_fail_out; 8278 } 8279 8280 /* 8281 * Configure HBA MSI-X attention conditions to messages 8282 */ 8283 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8284 8285 if (!pmb) { 8286 rc = -ENOMEM; 8287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8288 "0474 Unable to allocate memory for issuing " 8289 "MBOX_CONFIG_MSI command\n"); 8290 goto mem_fail_out; 8291 } 8292 rc = lpfc_config_msi(phba, pmb); 8293 if (rc) 8294 goto mbx_fail_out; 8295 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8296 if (rc != MBX_SUCCESS) { 8297 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 8298 "0351 Config MSI mailbox command failed, " 8299 "mbxCmd x%x, mbxStatus x%x\n", 8300 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 8301 goto mbx_fail_out; 8302 } 8303 8304 /* Free memory allocated for mailbox command */ 8305 mempool_free(pmb, phba->mbox_mem_pool); 8306 return rc; 8307 8308 mbx_fail_out: 8309 /* Free memory allocated for mailbox command */ 8310 mempool_free(pmb, phba->mbox_mem_pool); 8311 8312 mem_fail_out: 8313 /* free the irq already requested */ 8314 free_irq(phba->msix_entries[1].vector, phba); 8315 8316 irq_fail_out: 8317 /* free the irq already requested */ 8318 free_irq(phba->msix_entries[0].vector, phba); 8319 8320 msi_fail_out: 8321 /* Unconfigure MSI-X capability structure */ 8322 pci_disable_msix(phba->pcidev); 8323 return rc; 8324 } 8325 8326 /** 8327 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 8328 * @phba: pointer to lpfc hba data structure. 8329 * 8330 * This routine is invoked to release the MSI-X vectors and then disable the 8331 * MSI-X interrupt mode to device with SLI-3 interface spec. 8332 **/ 8333 static void 8334 lpfc_sli_disable_msix(struct lpfc_hba *phba) 8335 { 8336 int i; 8337 8338 /* Free up MSI-X multi-message vectors */ 8339 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8340 free_irq(phba->msix_entries[i].vector, phba); 8341 /* Disable MSI-X */ 8342 pci_disable_msix(phba->pcidev); 8343 8344 return; 8345 } 8346 8347 /** 8348 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 8349 * @phba: pointer to lpfc hba data structure. 8350 * 8351 * This routine is invoked to enable the MSI interrupt mode to device with 8352 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 8353 * enable the MSI vector. The device driver is responsible for calling the 8354 * request_irq() to register MSI vector with a interrupt the handler, which 8355 * is done in this function. 8356 * 8357 * Return codes 8358 * 0 - successful 8359 * other values - error 8360 */ 8361 static int 8362 lpfc_sli_enable_msi(struct lpfc_hba *phba) 8363 { 8364 int rc; 8365 8366 rc = pci_enable_msi(phba->pcidev); 8367 if (!rc) 8368 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8369 "0462 PCI enable MSI mode success.\n"); 8370 else { 8371 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8372 "0471 PCI enable MSI mode failed (%d)\n", rc); 8373 return rc; 8374 } 8375 8376 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8377 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8378 if (rc) { 8379 pci_disable_msi(phba->pcidev); 8380 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8381 "0478 MSI request_irq failed (%d)\n", rc); 8382 } 8383 return rc; 8384 } 8385 8386 /** 8387 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 8388 * @phba: pointer to lpfc hba data structure. 8389 * 8390 * This routine is invoked to disable the MSI interrupt mode to device with 8391 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 8392 * done request_irq() on before calling pci_disable_msi(). Failure to do so 8393 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 8394 * its vector. 8395 */ 8396 static void 8397 lpfc_sli_disable_msi(struct lpfc_hba *phba) 8398 { 8399 free_irq(phba->pcidev->irq, phba); 8400 pci_disable_msi(phba->pcidev); 8401 return; 8402 } 8403 8404 /** 8405 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 8406 * @phba: pointer to lpfc hba data structure. 8407 * 8408 * This routine is invoked to enable device interrupt and associate driver's 8409 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 8410 * spec. Depends on the interrupt mode configured to the driver, the driver 8411 * will try to fallback from the configured interrupt mode to an interrupt 8412 * mode which is supported by the platform, kernel, and device in the order 8413 * of: 8414 * MSI-X -> MSI -> IRQ. 8415 * 8416 * Return codes 8417 * 0 - successful 8418 * other values - error 8419 **/ 8420 static uint32_t 8421 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8422 { 8423 uint32_t intr_mode = LPFC_INTR_ERROR; 8424 int retval; 8425 8426 if (cfg_mode == 2) { 8427 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 8428 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 8429 if (!retval) { 8430 /* Now, try to enable MSI-X interrupt mode */ 8431 retval = lpfc_sli_enable_msix(phba); 8432 if (!retval) { 8433 /* Indicate initialization to MSI-X mode */ 8434 phba->intr_type = MSIX; 8435 intr_mode = 2; 8436 } 8437 } 8438 } 8439 8440 /* Fallback to MSI if MSI-X initialization failed */ 8441 if (cfg_mode >= 1 && phba->intr_type == NONE) { 8442 retval = lpfc_sli_enable_msi(phba); 8443 if (!retval) { 8444 /* Indicate initialization to MSI mode */ 8445 phba->intr_type = MSI; 8446 intr_mode = 1; 8447 } 8448 } 8449 8450 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 8451 if (phba->intr_type == NONE) { 8452 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8453 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8454 if (!retval) { 8455 /* Indicate initialization to INTx mode */ 8456 phba->intr_type = INTx; 8457 intr_mode = 0; 8458 } 8459 } 8460 return intr_mode; 8461 } 8462 8463 /** 8464 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 8465 * @phba: pointer to lpfc hba data structure. 8466 * 8467 * This routine is invoked to disable device interrupt and disassociate the 8468 * driver's interrupt handler(s) from interrupt vector(s) to device with 8469 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 8470 * release the interrupt vector(s) for the message signaled interrupt. 8471 **/ 8472 static void 8473 lpfc_sli_disable_intr(struct lpfc_hba *phba) 8474 { 8475 /* Disable the currently initialized interrupt mode */ 8476 if (phba->intr_type == MSIX) 8477 lpfc_sli_disable_msix(phba); 8478 else if (phba->intr_type == MSI) 8479 lpfc_sli_disable_msi(phba); 8480 else if (phba->intr_type == INTx) 8481 free_irq(phba->pcidev->irq, phba); 8482 8483 /* Reset interrupt management states */ 8484 phba->intr_type = NONE; 8485 phba->sli.slistat.sli_intr = 0; 8486 8487 return; 8488 } 8489 8490 /** 8491 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id 8492 * @phba: pointer to lpfc hba data structure. 8493 * 8494 * Find next available CPU to use for IRQ to CPU affinity. 8495 */ 8496 static int 8497 lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) 8498 { 8499 struct lpfc_vector_map_info *cpup; 8500 int cpu; 8501 8502 cpup = phba->sli4_hba.cpu_map; 8503 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8504 /* CPU must be online */ 8505 if (cpu_online(cpu)) { 8506 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8507 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && 8508 (cpup->phys_id == phys_id)) { 8509 return cpu; 8510 } 8511 } 8512 cpup++; 8513 } 8514 8515 /* 8516 * If we get here, we have used ALL CPUs for the specific 8517 * phys_id. Now we need to clear out lpfc_used_cpu and start 8518 * reusing CPUs. 8519 */ 8520 8521 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8522 if (lpfc_used_cpu[cpu] == phys_id) 8523 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; 8524 } 8525 8526 cpup = phba->sli4_hba.cpu_map; 8527 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8528 /* CPU must be online */ 8529 if (cpu_online(cpu)) { 8530 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8531 (cpup->phys_id == phys_id)) { 8532 return cpu; 8533 } 8534 } 8535 cpup++; 8536 } 8537 return LPFC_VECTOR_MAP_EMPTY; 8538 } 8539 8540 /** 8541 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors 8542 * @phba: pointer to lpfc hba data structure. 8543 * @vectors: number of HBA vectors 8544 * 8545 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector 8546 * affinization across multple physical CPUs (numa nodes). 8547 * In addition, this routine will assign an IO channel for each CPU 8548 * to use when issuing I/Os. 8549 */ 8550 static int 8551 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) 8552 { 8553 int i, idx, saved_chann, used_chann, cpu, phys_id; 8554 int max_phys_id, min_phys_id; 8555 int num_io_channel, first_cpu, chan; 8556 struct lpfc_vector_map_info *cpup; 8557 #ifdef CONFIG_X86 8558 struct cpuinfo_x86 *cpuinfo; 8559 #endif 8560 struct cpumask *mask; 8561 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; 8562 8563 /* If there is no mapping, just return */ 8564 if (!phba->cfg_fcp_cpu_map) 8565 return 1; 8566 8567 /* Init cpu_map array */ 8568 memset(phba->sli4_hba.cpu_map, 0xff, 8569 (sizeof(struct lpfc_vector_map_info) * 8570 phba->sli4_hba.num_present_cpu)); 8571 8572 max_phys_id = 0; 8573 min_phys_id = 0xff; 8574 phys_id = 0; 8575 num_io_channel = 0; 8576 first_cpu = LPFC_VECTOR_MAP_EMPTY; 8577 8578 /* Update CPU map with physical id and core id of each CPU */ 8579 cpup = phba->sli4_hba.cpu_map; 8580 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8581 #ifdef CONFIG_X86 8582 cpuinfo = &cpu_data(cpu); 8583 cpup->phys_id = cpuinfo->phys_proc_id; 8584 cpup->core_id = cpuinfo->cpu_core_id; 8585 #else 8586 /* No distinction between CPUs for other platforms */ 8587 cpup->phys_id = 0; 8588 cpup->core_id = 0; 8589 #endif 8590 8591 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8592 "3328 CPU physid %d coreid %d\n", 8593 cpup->phys_id, cpup->core_id); 8594 8595 if (cpup->phys_id > max_phys_id) 8596 max_phys_id = cpup->phys_id; 8597 if (cpup->phys_id < min_phys_id) 8598 min_phys_id = cpup->phys_id; 8599 cpup++; 8600 } 8601 8602 phys_id = min_phys_id; 8603 /* Now associate the HBA vectors with specific CPUs */ 8604 for (idx = 0; idx < vectors; idx++) { 8605 cpup = phba->sli4_hba.cpu_map; 8606 cpu = lpfc_find_next_cpu(phba, phys_id); 8607 if (cpu == LPFC_VECTOR_MAP_EMPTY) { 8608 8609 /* Try for all phys_id's */ 8610 for (i = 1; i < max_phys_id; i++) { 8611 phys_id++; 8612 if (phys_id > max_phys_id) 8613 phys_id = min_phys_id; 8614 cpu = lpfc_find_next_cpu(phba, phys_id); 8615 if (cpu == LPFC_VECTOR_MAP_EMPTY) 8616 continue; 8617 goto found; 8618 } 8619 8620 /* Use round robin for scheduling */ 8621 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; 8622 chan = 0; 8623 cpup = phba->sli4_hba.cpu_map; 8624 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 8625 cpup->channel_id = chan; 8626 cpup++; 8627 chan++; 8628 if (chan >= phba->cfg_fcp_io_channel) 8629 chan = 0; 8630 } 8631 8632 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8633 "3329 Cannot set affinity:" 8634 "Error mapping vector %d (%d)\n", 8635 idx, vectors); 8636 return 0; 8637 } 8638 found: 8639 cpup += cpu; 8640 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) 8641 lpfc_used_cpu[cpu] = phys_id; 8642 8643 /* Associate vector with selected CPU */ 8644 cpup->irq = phba->sli4_hba.msix_entries[idx].vector; 8645 8646 /* Associate IO channel with selected CPU */ 8647 cpup->channel_id = idx; 8648 num_io_channel++; 8649 8650 if (first_cpu == LPFC_VECTOR_MAP_EMPTY) 8651 first_cpu = cpu; 8652 8653 /* Now affinitize to the selected CPU */ 8654 mask = &cpup->maskbits; 8655 cpumask_clear(mask); 8656 cpumask_set_cpu(cpu, mask); 8657 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. 8658 vector, mask); 8659 8660 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8661 "3330 Set Affinity: CPU %d channel %d " 8662 "irq %d (%x)\n", 8663 cpu, cpup->channel_id, 8664 phba->sli4_hba.msix_entries[idx].vector, i); 8665 8666 /* Spread vector mapping across multple physical CPU nodes */ 8667 phys_id++; 8668 if (phys_id > max_phys_id) 8669 phys_id = min_phys_id; 8670 } 8671 8672 /* 8673 * Finally fill in the IO channel for any remaining CPUs. 8674 * At this point, all IO channels have been assigned to a specific 8675 * MSIx vector, mapped to a specific CPU. 8676 * Base the remaining IO channel assigned, to IO channels already 8677 * assigned to other CPUs on the same phys_id. 8678 */ 8679 for (i = min_phys_id; i <= max_phys_id; i++) { 8680 /* 8681 * If there are no io channels already mapped to 8682 * this phys_id, just round robin thru the io_channels. 8683 * Setup chann[] for round robin. 8684 */ 8685 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) 8686 chann[idx] = idx; 8687 8688 saved_chann = 0; 8689 used_chann = 0; 8690 8691 /* 8692 * First build a list of IO channels already assigned 8693 * to this phys_id before reassigning the same IO 8694 * channels to the remaining CPUs. 8695 */ 8696 cpup = phba->sli4_hba.cpu_map; 8697 cpu = first_cpu; 8698 cpup += cpu; 8699 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; 8700 idx++) { 8701 if (cpup->phys_id == i) { 8702 /* 8703 * Save any IO channels that are 8704 * already mapped to this phys_id. 8705 */ 8706 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { 8707 chann[saved_chann] = 8708 cpup->channel_id; 8709 saved_chann++; 8710 goto out; 8711 } 8712 8713 /* See if we are using round-robin */ 8714 if (saved_chann == 0) 8715 saved_chann = 8716 phba->cfg_fcp_io_channel; 8717 8718 /* Associate next IO channel with CPU */ 8719 cpup->channel_id = chann[used_chann]; 8720 num_io_channel++; 8721 used_chann++; 8722 if (used_chann == saved_chann) 8723 used_chann = 0; 8724 8725 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8726 "3331 Set IO_CHANN " 8727 "CPU %d channel %d\n", 8728 idx, cpup->channel_id); 8729 } 8730 out: 8731 cpu++; 8732 if (cpu >= phba->sli4_hba.num_present_cpu) { 8733 cpup = phba->sli4_hba.cpu_map; 8734 cpu = 0; 8735 } else { 8736 cpup++; 8737 } 8738 } 8739 } 8740 8741 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { 8742 cpup = phba->sli4_hba.cpu_map; 8743 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { 8744 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { 8745 cpup->channel_id = 0; 8746 num_io_channel++; 8747 8748 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8749 "3332 Assign IO_CHANN " 8750 "CPU %d channel %d\n", 8751 idx, cpup->channel_id); 8752 } 8753 cpup++; 8754 } 8755 } 8756 8757 /* Sanity check */ 8758 if (num_io_channel != phba->sli4_hba.num_present_cpu) 8759 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8760 "3333 Set affinity mismatch:" 8761 "%d chann != %d cpus: %d vectors\n", 8762 num_io_channel, phba->sli4_hba.num_present_cpu, 8763 vectors); 8764 8765 /* Enable using cpu affinity for scheduling */ 8766 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; 8767 return 1; 8768 } 8769 8770 8771 /** 8772 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 8773 * @phba: pointer to lpfc hba data structure. 8774 * 8775 * This routine is invoked to enable the MSI-X interrupt vectors to device 8776 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 8777 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 8778 * enables either all or nothing, depending on the current availability of 8779 * PCI vector resources. The device driver is responsible for calling the 8780 * individual request_irq() to register each MSI-X vector with a interrupt 8781 * handler, which is done in this function. Note that later when device is 8782 * unloading, the driver should always call free_irq() on all MSI-X vectors 8783 * it has done request_irq() on before calling pci_disable_msix(). Failure 8784 * to do so results in a BUG_ON() and a device will be left with MSI-X 8785 * enabled and leaks its vectors. 8786 * 8787 * Return codes 8788 * 0 - successful 8789 * other values - error 8790 **/ 8791 static int 8792 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 8793 { 8794 int vectors, rc, index; 8795 8796 /* Set up MSI-X multi-message vectors */ 8797 for (index = 0; index < phba->cfg_fcp_io_channel; index++) 8798 phba->sli4_hba.msix_entries[index].entry = index; 8799 8800 /* Configure MSI-X capability structure */ 8801 vectors = phba->cfg_fcp_io_channel; 8802 if (phba->cfg_fof) { 8803 phba->sli4_hba.msix_entries[index].entry = index; 8804 vectors++; 8805 } 8806 enable_msix_vectors: 8807 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 8808 vectors); 8809 if (rc > 1) { 8810 vectors = rc; 8811 goto enable_msix_vectors; 8812 } else if (rc) { 8813 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8814 "0484 PCI enable MSI-X failed (%d)\n", rc); 8815 goto msi_fail_out; 8816 } 8817 8818 /* Log MSI-X vector assignment */ 8819 for (index = 0; index < vectors; index++) 8820 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8821 "0489 MSI-X entry[%d]: vector=x%x " 8822 "message=%d\n", index, 8823 phba->sli4_hba.msix_entries[index].vector, 8824 phba->sli4_hba.msix_entries[index].entry); 8825 8826 /* Assign MSI-X vectors to interrupt handlers */ 8827 for (index = 0; index < vectors; index++) { 8828 memset(&phba->sli4_hba.handler_name[index], 0, 16); 8829 sprintf((char *)&phba->sli4_hba.handler_name[index], 8830 LPFC_DRIVER_HANDLER_NAME"%d", index); 8831 8832 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8833 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8834 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); 8835 if (phba->cfg_fof && (index == (vectors - 1))) 8836 rc = request_irq( 8837 phba->sli4_hba.msix_entries[index].vector, 8838 &lpfc_sli4_fof_intr_handler, IRQF_SHARED, 8839 (char *)&phba->sli4_hba.handler_name[index], 8840 &phba->sli4_hba.fcp_eq_hdl[index]); 8841 else 8842 rc = request_irq( 8843 phba->sli4_hba.msix_entries[index].vector, 8844 &lpfc_sli4_hba_intr_handler, IRQF_SHARED, 8845 (char *)&phba->sli4_hba.handler_name[index], 8846 &phba->sli4_hba.fcp_eq_hdl[index]); 8847 if (rc) { 8848 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8849 "0486 MSI-X fast-path (%d) " 8850 "request_irq failed (%d)\n", index, rc); 8851 goto cfg_fail_out; 8852 } 8853 } 8854 8855 if (phba->cfg_fof) 8856 vectors--; 8857 8858 if (vectors != phba->cfg_fcp_io_channel) { 8859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8860 "3238 Reducing IO channels to match number of " 8861 "MSI-X vectors, requested %d got %d\n", 8862 phba->cfg_fcp_io_channel, vectors); 8863 phba->cfg_fcp_io_channel = vectors; 8864 } 8865 8866 lpfc_sli4_set_affinity(phba, vectors); 8867 return rc; 8868 8869 cfg_fail_out: 8870 /* free the irq already requested */ 8871 for (--index; index >= 0; index--) { 8872 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 8873 vector, NULL); 8874 free_irq(phba->sli4_hba.msix_entries[index].vector, 8875 &phba->sli4_hba.fcp_eq_hdl[index]); 8876 } 8877 8878 msi_fail_out: 8879 /* Unconfigure MSI-X capability structure */ 8880 pci_disable_msix(phba->pcidev); 8881 return rc; 8882 } 8883 8884 /** 8885 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 8886 * @phba: pointer to lpfc hba data structure. 8887 * 8888 * This routine is invoked to release the MSI-X vectors and then disable the 8889 * MSI-X interrupt mode to device with SLI-4 interface spec. 8890 **/ 8891 static void 8892 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 8893 { 8894 int index; 8895 8896 /* Free up MSI-X multi-message vectors */ 8897 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 8898 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 8899 vector, NULL); 8900 free_irq(phba->sli4_hba.msix_entries[index].vector, 8901 &phba->sli4_hba.fcp_eq_hdl[index]); 8902 } 8903 if (phba->cfg_fof) { 8904 free_irq(phba->sli4_hba.msix_entries[index].vector, 8905 &phba->sli4_hba.fcp_eq_hdl[index]); 8906 } 8907 /* Disable MSI-X */ 8908 pci_disable_msix(phba->pcidev); 8909 8910 return; 8911 } 8912 8913 /** 8914 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 8915 * @phba: pointer to lpfc hba data structure. 8916 * 8917 * This routine is invoked to enable the MSI interrupt mode to device with 8918 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 8919 * to enable the MSI vector. The device driver is responsible for calling 8920 * the request_irq() to register MSI vector with a interrupt the handler, 8921 * which is done in this function. 8922 * 8923 * Return codes 8924 * 0 - successful 8925 * other values - error 8926 **/ 8927 static int 8928 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 8929 { 8930 int rc, index; 8931 8932 rc = pci_enable_msi(phba->pcidev); 8933 if (!rc) 8934 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8935 "0487 PCI enable MSI mode success.\n"); 8936 else { 8937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8938 "0488 PCI enable MSI mode failed (%d)\n", rc); 8939 return rc; 8940 } 8941 8942 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 8943 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8944 if (rc) { 8945 pci_disable_msi(phba->pcidev); 8946 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8947 "0490 MSI request_irq failed (%d)\n", rc); 8948 return rc; 8949 } 8950 8951 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 8952 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8953 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8954 } 8955 8956 if (phba->cfg_fof) { 8957 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8958 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8959 } 8960 return 0; 8961 } 8962 8963 /** 8964 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 8965 * @phba: pointer to lpfc hba data structure. 8966 * 8967 * This routine is invoked to disable the MSI interrupt mode to device with 8968 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 8969 * done request_irq() on before calling pci_disable_msi(). Failure to do so 8970 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 8971 * its vector. 8972 **/ 8973 static void 8974 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 8975 { 8976 free_irq(phba->pcidev->irq, phba); 8977 pci_disable_msi(phba->pcidev); 8978 return; 8979 } 8980 8981 /** 8982 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 8983 * @phba: pointer to lpfc hba data structure. 8984 * 8985 * This routine is invoked to enable device interrupt and associate driver's 8986 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 8987 * interface spec. Depends on the interrupt mode configured to the driver, 8988 * the driver will try to fallback from the configured interrupt mode to an 8989 * interrupt mode which is supported by the platform, kernel, and device in 8990 * the order of: 8991 * MSI-X -> MSI -> IRQ. 8992 * 8993 * Return codes 8994 * 0 - successful 8995 * other values - error 8996 **/ 8997 static uint32_t 8998 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8999 { 9000 uint32_t intr_mode = LPFC_INTR_ERROR; 9001 int retval, index; 9002 9003 if (cfg_mode == 2) { 9004 /* Preparation before conf_msi mbox cmd */ 9005 retval = 0; 9006 if (!retval) { 9007 /* Now, try to enable MSI-X interrupt mode */ 9008 retval = lpfc_sli4_enable_msix(phba); 9009 if (!retval) { 9010 /* Indicate initialization to MSI-X mode */ 9011 phba->intr_type = MSIX; 9012 intr_mode = 2; 9013 } 9014 } 9015 } 9016 9017 /* Fallback to MSI if MSI-X initialization failed */ 9018 if (cfg_mode >= 1 && phba->intr_type == NONE) { 9019 retval = lpfc_sli4_enable_msi(phba); 9020 if (!retval) { 9021 /* Indicate initialization to MSI mode */ 9022 phba->intr_type = MSI; 9023 intr_mode = 1; 9024 } 9025 } 9026 9027 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 9028 if (phba->intr_type == NONE) { 9029 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9030 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 9031 if (!retval) { 9032 /* Indicate initialization to INTx mode */ 9033 phba->intr_type = INTx; 9034 intr_mode = 0; 9035 for (index = 0; index < phba->cfg_fcp_io_channel; 9036 index++) { 9037 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9038 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9039 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 9040 fcp_eq_in_use, 1); 9041 } 9042 if (phba->cfg_fof) { 9043 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9044 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9045 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 9046 fcp_eq_in_use, 1); 9047 } 9048 } 9049 } 9050 return intr_mode; 9051 } 9052 9053 /** 9054 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 9055 * @phba: pointer to lpfc hba data structure. 9056 * 9057 * This routine is invoked to disable device interrupt and disassociate 9058 * the driver's interrupt handler(s) from interrupt vector(s) to device 9059 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 9060 * will release the interrupt vector(s) for the message signaled interrupt. 9061 **/ 9062 static void 9063 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 9064 { 9065 /* Disable the currently initialized interrupt mode */ 9066 if (phba->intr_type == MSIX) 9067 lpfc_sli4_disable_msix(phba); 9068 else if (phba->intr_type == MSI) 9069 lpfc_sli4_disable_msi(phba); 9070 else if (phba->intr_type == INTx) 9071 free_irq(phba->pcidev->irq, phba); 9072 9073 /* Reset interrupt management states */ 9074 phba->intr_type = NONE; 9075 phba->sli.slistat.sli_intr = 0; 9076 9077 return; 9078 } 9079 9080 /** 9081 * lpfc_unset_hba - Unset SLI3 hba device initialization 9082 * @phba: pointer to lpfc hba data structure. 9083 * 9084 * This routine is invoked to unset the HBA device initialization steps to 9085 * a device with SLI-3 interface spec. 9086 **/ 9087 static void 9088 lpfc_unset_hba(struct lpfc_hba *phba) 9089 { 9090 struct lpfc_vport *vport = phba->pport; 9091 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9092 9093 spin_lock_irq(shost->host_lock); 9094 vport->load_flag |= FC_UNLOADING; 9095 spin_unlock_irq(shost->host_lock); 9096 9097 kfree(phba->vpi_bmask); 9098 kfree(phba->vpi_ids); 9099 9100 lpfc_stop_hba_timers(phba); 9101 9102 phba->pport->work_port_events = 0; 9103 9104 lpfc_sli_hba_down(phba); 9105 9106 lpfc_sli_brdrestart(phba); 9107 9108 lpfc_sli_disable_intr(phba); 9109 9110 return; 9111 } 9112 9113 /** 9114 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 9115 * @phba: Pointer to HBA context object. 9116 * 9117 * This function is called in the SLI4 code path to wait for completion 9118 * of device's XRIs exchange busy. It will check the XRI exchange busy 9119 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 9120 * that, it will check the XRI exchange busy on outstanding FCP and ELS 9121 * I/Os every 30 seconds, log error message, and wait forever. Only when 9122 * all XRI exchange busy complete, the driver unload shall proceed with 9123 * invoking the function reset ioctl mailbox command to the CNA and the 9124 * the rest of the driver unload resource release. 9125 **/ 9126 static void 9127 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 9128 { 9129 int wait_time = 0; 9130 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9131 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9132 9133 while (!fcp_xri_cmpl || !els_xri_cmpl) { 9134 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 9135 if (!fcp_xri_cmpl) 9136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9137 "2877 FCP XRI exchange busy " 9138 "wait time: %d seconds.\n", 9139 wait_time/1000); 9140 if (!els_xri_cmpl) 9141 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9142 "2878 ELS XRI exchange busy " 9143 "wait time: %d seconds.\n", 9144 wait_time/1000); 9145 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 9146 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 9147 } else { 9148 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 9149 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 9150 } 9151 fcp_xri_cmpl = 9152 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9153 els_xri_cmpl = 9154 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9155 } 9156 } 9157 9158 /** 9159 * lpfc_sli4_hba_unset - Unset the fcoe hba 9160 * @phba: Pointer to HBA context object. 9161 * 9162 * This function is called in the SLI4 code path to reset the HBA's FCoE 9163 * function. The caller is not required to hold any lock. This routine 9164 * issues PCI function reset mailbox command to reset the FCoE function. 9165 * At the end of the function, it calls lpfc_hba_down_post function to 9166 * free any pending commands. 9167 **/ 9168 static void 9169 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 9170 { 9171 int wait_cnt = 0; 9172 LPFC_MBOXQ_t *mboxq; 9173 struct pci_dev *pdev = phba->pcidev; 9174 9175 lpfc_stop_hba_timers(phba); 9176 phba->sli4_hba.intr_enable = 0; 9177 9178 /* 9179 * Gracefully wait out the potential current outstanding asynchronous 9180 * mailbox command. 9181 */ 9182 9183 /* First, block any pending async mailbox command from posted */ 9184 spin_lock_irq(&phba->hbalock); 9185 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9186 spin_unlock_irq(&phba->hbalock); 9187 /* Now, trying to wait it out if we can */ 9188 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9189 msleep(10); 9190 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 9191 break; 9192 } 9193 /* Forcefully release the outstanding mailbox command if timed out */ 9194 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9195 spin_lock_irq(&phba->hbalock); 9196 mboxq = phba->sli.mbox_active; 9197 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 9198 __lpfc_mbox_cmpl_put(phba, mboxq); 9199 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9200 phba->sli.mbox_active = NULL; 9201 spin_unlock_irq(&phba->hbalock); 9202 } 9203 9204 /* Abort all iocbs associated with the hba */ 9205 lpfc_sli_hba_iocb_abort(phba); 9206 9207 /* Wait for completion of device XRI exchange busy */ 9208 lpfc_sli4_xri_exchange_busy_wait(phba); 9209 9210 /* Disable PCI subsystem interrupt */ 9211 lpfc_sli4_disable_intr(phba); 9212 9213 /* Disable SR-IOV if enabled */ 9214 if (phba->cfg_sriov_nr_virtfn) 9215 pci_disable_sriov(pdev); 9216 9217 /* Stop kthread signal shall trigger work_done one more time */ 9218 kthread_stop(phba->worker_thread); 9219 9220 /* Reset SLI4 HBA FCoE function */ 9221 lpfc_pci_function_reset(phba); 9222 lpfc_sli4_queue_destroy(phba); 9223 9224 /* Stop the SLI4 device port */ 9225 phba->pport->work_port_events = 0; 9226 } 9227 9228 /** 9229 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 9230 * @phba: Pointer to HBA context object. 9231 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9232 * 9233 * This function is called in the SLI4 code path to read the port's 9234 * sli4 capabilities. 9235 * 9236 * This function may be be called from any context that can block-wait 9237 * for the completion. The expectation is that this routine is called 9238 * typically from probe_one or from the online routine. 9239 **/ 9240 int 9241 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9242 { 9243 int rc; 9244 struct lpfc_mqe *mqe; 9245 struct lpfc_pc_sli4_params *sli4_params; 9246 uint32_t mbox_tmo; 9247 9248 rc = 0; 9249 mqe = &mboxq->u.mqe; 9250 9251 /* Read the port's SLI4 Parameters port capabilities */ 9252 lpfc_pc_sli4_params(mboxq); 9253 if (!phba->sli4_hba.intr_enable) 9254 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9255 else { 9256 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9257 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9258 } 9259 9260 if (unlikely(rc)) 9261 return 1; 9262 9263 sli4_params = &phba->sli4_hba.pc_sli4_params; 9264 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 9265 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 9266 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 9267 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 9268 &mqe->un.sli4_params); 9269 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 9270 &mqe->un.sli4_params); 9271 sli4_params->proto_types = mqe->un.sli4_params.word3; 9272 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 9273 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 9274 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 9275 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 9276 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 9277 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 9278 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 9279 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 9280 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 9281 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 9282 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 9283 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 9284 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 9285 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 9286 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 9287 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 9288 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 9289 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 9290 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 9291 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 9292 9293 /* Make sure that sge_supp_len can be handled by the driver */ 9294 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9295 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9296 9297 return rc; 9298 } 9299 9300 /** 9301 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 9302 * @phba: Pointer to HBA context object. 9303 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9304 * 9305 * This function is called in the SLI4 code path to read the port's 9306 * sli4 capabilities. 9307 * 9308 * This function may be be called from any context that can block-wait 9309 * for the completion. The expectation is that this routine is called 9310 * typically from probe_one or from the online routine. 9311 **/ 9312 int 9313 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9314 { 9315 int rc; 9316 struct lpfc_mqe *mqe = &mboxq->u.mqe; 9317 struct lpfc_pc_sli4_params *sli4_params; 9318 uint32_t mbox_tmo; 9319 int length; 9320 struct lpfc_sli4_parameters *mbx_sli4_parameters; 9321 9322 /* 9323 * By default, the driver assumes the SLI4 port requires RPI 9324 * header postings. The SLI4_PARAM response will correct this 9325 * assumption. 9326 */ 9327 phba->sli4_hba.rpi_hdrs_in_use = 1; 9328 9329 /* Read the port's SLI4 Config Parameters */ 9330 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 9331 sizeof(struct lpfc_sli4_cfg_mhdr)); 9332 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9333 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 9334 length, LPFC_SLI4_MBX_EMBED); 9335 if (!phba->sli4_hba.intr_enable) 9336 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9337 else { 9338 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9339 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9340 } 9341 if (unlikely(rc)) 9342 return rc; 9343 sli4_params = &phba->sli4_hba.pc_sli4_params; 9344 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 9345 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 9346 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 9347 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 9348 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 9349 mbx_sli4_parameters); 9350 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 9351 mbx_sli4_parameters); 9352 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 9353 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 9354 else 9355 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 9356 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 9357 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 9358 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 9359 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 9360 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 9361 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 9362 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 9363 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 9364 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 9365 mbx_sli4_parameters); 9366 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 9367 mbx_sli4_parameters); 9368 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 9369 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 9370 9371 /* Make sure that sge_supp_len can be handled by the driver */ 9372 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9373 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9374 9375 return 0; 9376 } 9377 9378 /** 9379 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 9380 * @pdev: pointer to PCI device 9381 * @pid: pointer to PCI device identifier 9382 * 9383 * This routine is to be called to attach a device with SLI-3 interface spec 9384 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9385 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 9386 * information of the device and driver to see if the driver state that it can 9387 * support this kind of device. If the match is successful, the driver core 9388 * invokes this routine. If this routine determines it can claim the HBA, it 9389 * does all the initialization that it needs to do to handle the HBA properly. 9390 * 9391 * Return code 9392 * 0 - driver can claim the device 9393 * negative value - driver can not claim the device 9394 **/ 9395 static int 9396 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 9397 { 9398 struct lpfc_hba *phba; 9399 struct lpfc_vport *vport = NULL; 9400 struct Scsi_Host *shost = NULL; 9401 int error; 9402 uint32_t cfg_mode, intr_mode; 9403 9404 /* Allocate memory for HBA structure */ 9405 phba = lpfc_hba_alloc(pdev); 9406 if (!phba) 9407 return -ENOMEM; 9408 9409 /* Perform generic PCI device enabling operation */ 9410 error = lpfc_enable_pci_dev(phba); 9411 if (error) 9412 goto out_free_phba; 9413 9414 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 9415 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 9416 if (error) 9417 goto out_disable_pci_dev; 9418 9419 /* Set up SLI-3 specific device PCI memory space */ 9420 error = lpfc_sli_pci_mem_setup(phba); 9421 if (error) { 9422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9423 "1402 Failed to set up pci memory space.\n"); 9424 goto out_disable_pci_dev; 9425 } 9426 9427 /* Set up phase-1 common device driver resources */ 9428 error = lpfc_setup_driver_resource_phase1(phba); 9429 if (error) { 9430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9431 "1403 Failed to set up driver resource.\n"); 9432 goto out_unset_pci_mem_s3; 9433 } 9434 9435 /* Set up SLI-3 specific device driver resources */ 9436 error = lpfc_sli_driver_resource_setup(phba); 9437 if (error) { 9438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9439 "1404 Failed to set up driver resource.\n"); 9440 goto out_unset_pci_mem_s3; 9441 } 9442 9443 /* Initialize and populate the iocb list per host */ 9444 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 9445 if (error) { 9446 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9447 "1405 Failed to initialize iocb list.\n"); 9448 goto out_unset_driver_resource_s3; 9449 } 9450 9451 /* Set up common device driver resources */ 9452 error = lpfc_setup_driver_resource_phase2(phba); 9453 if (error) { 9454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9455 "1406 Failed to set up driver resource.\n"); 9456 goto out_free_iocb_list; 9457 } 9458 9459 /* Get the default values for Model Name and Description */ 9460 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9461 9462 /* Create SCSI host to the physical port */ 9463 error = lpfc_create_shost(phba); 9464 if (error) { 9465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9466 "1407 Failed to create scsi host.\n"); 9467 goto out_unset_driver_resource; 9468 } 9469 9470 /* Configure sysfs attributes */ 9471 vport = phba->pport; 9472 error = lpfc_alloc_sysfs_attr(vport); 9473 if (error) { 9474 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9475 "1476 Failed to allocate sysfs attr\n"); 9476 goto out_destroy_shost; 9477 } 9478 9479 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9480 /* Now, trying to enable interrupt and bring up the device */ 9481 cfg_mode = phba->cfg_use_msi; 9482 while (true) { 9483 /* Put device to a known state before enabling interrupt */ 9484 lpfc_stop_port(phba); 9485 /* Configure and enable interrupt */ 9486 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 9487 if (intr_mode == LPFC_INTR_ERROR) { 9488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9489 "0431 Failed to enable interrupt.\n"); 9490 error = -ENODEV; 9491 goto out_free_sysfs_attr; 9492 } 9493 /* SLI-3 HBA setup */ 9494 if (lpfc_sli_hba_setup(phba)) { 9495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9496 "1477 Failed to set up hba\n"); 9497 error = -ENODEV; 9498 goto out_remove_device; 9499 } 9500 9501 /* Wait 50ms for the interrupts of previous mailbox commands */ 9502 msleep(50); 9503 /* Check active interrupts on message signaled interrupts */ 9504 if (intr_mode == 0 || 9505 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 9506 /* Log the current active interrupt mode */ 9507 phba->intr_mode = intr_mode; 9508 lpfc_log_intr_mode(phba, intr_mode); 9509 break; 9510 } else { 9511 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9512 "0447 Configure interrupt mode (%d) " 9513 "failed active interrupt test.\n", 9514 intr_mode); 9515 /* Disable the current interrupt mode */ 9516 lpfc_sli_disable_intr(phba); 9517 /* Try next level of interrupt mode */ 9518 cfg_mode = --intr_mode; 9519 } 9520 } 9521 9522 /* Perform post initialization setup */ 9523 lpfc_post_init_setup(phba); 9524 9525 /* Check if there are static vports to be created. */ 9526 lpfc_create_static_vport(phba); 9527 9528 return 0; 9529 9530 out_remove_device: 9531 lpfc_unset_hba(phba); 9532 out_free_sysfs_attr: 9533 lpfc_free_sysfs_attr(vport); 9534 out_destroy_shost: 9535 lpfc_destroy_shost(phba); 9536 out_unset_driver_resource: 9537 lpfc_unset_driver_resource_phase2(phba); 9538 out_free_iocb_list: 9539 lpfc_free_iocb_list(phba); 9540 out_unset_driver_resource_s3: 9541 lpfc_sli_driver_resource_unset(phba); 9542 out_unset_pci_mem_s3: 9543 lpfc_sli_pci_mem_unset(phba); 9544 out_disable_pci_dev: 9545 lpfc_disable_pci_dev(phba); 9546 if (shost) 9547 scsi_host_put(shost); 9548 out_free_phba: 9549 lpfc_hba_free(phba); 9550 return error; 9551 } 9552 9553 /** 9554 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 9555 * @pdev: pointer to PCI device 9556 * 9557 * This routine is to be called to disattach a device with SLI-3 interface 9558 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9559 * removed from PCI bus, it performs all the necessary cleanup for the HBA 9560 * device to be removed from the PCI subsystem properly. 9561 **/ 9562 static void 9563 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 9564 { 9565 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9566 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 9567 struct lpfc_vport **vports; 9568 struct lpfc_hba *phba = vport->phba; 9569 int i; 9570 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 9571 9572 spin_lock_irq(&phba->hbalock); 9573 vport->load_flag |= FC_UNLOADING; 9574 spin_unlock_irq(&phba->hbalock); 9575 9576 lpfc_free_sysfs_attr(vport); 9577 9578 /* Release all the vports against this physical port */ 9579 vports = lpfc_create_vport_work_array(phba); 9580 if (vports != NULL) 9581 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9582 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 9583 continue; 9584 fc_vport_terminate(vports[i]->fc_vport); 9585 } 9586 lpfc_destroy_vport_work_array(phba, vports); 9587 9588 /* Remove FC host and then SCSI host with the physical port */ 9589 fc_remove_host(shost); 9590 scsi_remove_host(shost); 9591 lpfc_cleanup(vport); 9592 9593 /* 9594 * Bring down the SLI Layer. This step disable all interrupts, 9595 * clears the rings, discards all mailbox commands, and resets 9596 * the HBA. 9597 */ 9598 9599 /* HBA interrupt will be disabled after this call */ 9600 lpfc_sli_hba_down(phba); 9601 /* Stop kthread signal shall trigger work_done one more time */ 9602 kthread_stop(phba->worker_thread); 9603 /* Final cleanup of txcmplq and reset the HBA */ 9604 lpfc_sli_brdrestart(phba); 9605 9606 kfree(phba->vpi_bmask); 9607 kfree(phba->vpi_ids); 9608 9609 lpfc_stop_hba_timers(phba); 9610 spin_lock_irq(&phba->hbalock); 9611 list_del_init(&vport->listentry); 9612 spin_unlock_irq(&phba->hbalock); 9613 9614 lpfc_debugfs_terminate(vport); 9615 9616 /* Disable SR-IOV if enabled */ 9617 if (phba->cfg_sriov_nr_virtfn) 9618 pci_disable_sriov(pdev); 9619 9620 /* Disable interrupt */ 9621 lpfc_sli_disable_intr(phba); 9622 9623 scsi_host_put(shost); 9624 9625 /* 9626 * Call scsi_free before mem_free since scsi bufs are released to their 9627 * corresponding pools here. 9628 */ 9629 lpfc_scsi_free(phba); 9630 lpfc_mem_free_all(phba); 9631 9632 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9633 phba->hbqslimp.virt, phba->hbqslimp.phys); 9634 9635 /* Free resources associated with SLI2 interface */ 9636 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9637 phba->slim2p.virt, phba->slim2p.phys); 9638 9639 /* unmap adapter SLIM and Control Registers */ 9640 iounmap(phba->ctrl_regs_memmap_p); 9641 iounmap(phba->slim_memmap_p); 9642 9643 lpfc_hba_free(phba); 9644 9645 pci_release_selected_regions(pdev, bars); 9646 pci_disable_device(pdev); 9647 } 9648 9649 /** 9650 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 9651 * @pdev: pointer to PCI device 9652 * @msg: power management message 9653 * 9654 * This routine is to be called from the kernel's PCI subsystem to support 9655 * system Power Management (PM) to device with SLI-3 interface spec. When 9656 * PM invokes this method, it quiesces the device by stopping the driver's 9657 * worker thread for the device, turning off device's interrupt and DMA, 9658 * and bring the device offline. Note that as the driver implements the 9659 * minimum PM requirements to a power-aware driver's PM support for the 9660 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 9661 * to the suspend() method call will be treated as SUSPEND and the driver will 9662 * fully reinitialize its device during resume() method call, the driver will 9663 * set device to PCI_D3hot state in PCI config space instead of setting it 9664 * according to the @msg provided by the PM. 9665 * 9666 * Return code 9667 * 0 - driver suspended the device 9668 * Error otherwise 9669 **/ 9670 static int 9671 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 9672 { 9673 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9674 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9675 9676 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9677 "0473 PCI device Power Management suspend.\n"); 9678 9679 /* Bring down the device */ 9680 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 9681 lpfc_offline(phba); 9682 kthread_stop(phba->worker_thread); 9683 9684 /* Disable interrupt from device */ 9685 lpfc_sli_disable_intr(phba); 9686 9687 /* Save device state to PCI config space */ 9688 pci_save_state(pdev); 9689 pci_set_power_state(pdev, PCI_D3hot); 9690 9691 return 0; 9692 } 9693 9694 /** 9695 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 9696 * @pdev: pointer to PCI device 9697 * 9698 * This routine is to be called from the kernel's PCI subsystem to support 9699 * system Power Management (PM) to device with SLI-3 interface spec. When PM 9700 * invokes this method, it restores the device's PCI config space state and 9701 * fully reinitializes the device and brings it online. Note that as the 9702 * driver implements the minimum PM requirements to a power-aware driver's 9703 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 9704 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 9705 * driver will fully reinitialize its device during resume() method call, 9706 * the device will be set to PCI_D0 directly in PCI config space before 9707 * restoring the state. 9708 * 9709 * Return code 9710 * 0 - driver suspended the device 9711 * Error otherwise 9712 **/ 9713 static int 9714 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 9715 { 9716 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9717 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9718 uint32_t intr_mode; 9719 int error; 9720 9721 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9722 "0452 PCI device Power Management resume.\n"); 9723 9724 /* Restore device state from PCI config space */ 9725 pci_set_power_state(pdev, PCI_D0); 9726 pci_restore_state(pdev); 9727 9728 /* 9729 * As the new kernel behavior of pci_restore_state() API call clears 9730 * device saved_state flag, need to save the restored state again. 9731 */ 9732 pci_save_state(pdev); 9733 9734 if (pdev->is_busmaster) 9735 pci_set_master(pdev); 9736 9737 /* Startup the kernel thread for this host adapter. */ 9738 phba->worker_thread = kthread_run(lpfc_do_work, phba, 9739 "lpfc_worker_%d", phba->brd_no); 9740 if (IS_ERR(phba->worker_thread)) { 9741 error = PTR_ERR(phba->worker_thread); 9742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9743 "0434 PM resume failed to start worker " 9744 "thread: error=x%x.\n", error); 9745 return error; 9746 } 9747 9748 /* Configure and enable interrupt */ 9749 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 9750 if (intr_mode == LPFC_INTR_ERROR) { 9751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9752 "0430 PM resume Failed to enable interrupt\n"); 9753 return -EIO; 9754 } else 9755 phba->intr_mode = intr_mode; 9756 9757 /* Restart HBA and bring it online */ 9758 lpfc_sli_brdrestart(phba); 9759 lpfc_online(phba); 9760 9761 /* Log the current active interrupt mode */ 9762 lpfc_log_intr_mode(phba, phba->intr_mode); 9763 9764 return 0; 9765 } 9766 9767 /** 9768 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 9769 * @phba: pointer to lpfc hba data structure. 9770 * 9771 * This routine is called to prepare the SLI3 device for PCI slot recover. It 9772 * aborts all the outstanding SCSI I/Os to the pci device. 9773 **/ 9774 static void 9775 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 9776 { 9777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9778 "2723 PCI channel I/O abort preparing for recovery\n"); 9779 9780 /* 9781 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9782 * and let the SCSI mid-layer to retry them to recover. 9783 */ 9784 lpfc_sli_abort_fcp_rings(phba); 9785 } 9786 9787 /** 9788 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 9789 * @phba: pointer to lpfc hba data structure. 9790 * 9791 * This routine is called to prepare the SLI3 device for PCI slot reset. It 9792 * disables the device interrupt and pci device, and aborts the internal FCP 9793 * pending I/Os. 9794 **/ 9795 static void 9796 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 9797 { 9798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9799 "2710 PCI channel disable preparing for reset\n"); 9800 9801 /* Block any management I/Os to the device */ 9802 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 9803 9804 /* Block all SCSI devices' I/Os on the host */ 9805 lpfc_scsi_dev_block(phba); 9806 9807 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 9808 lpfc_sli_flush_fcp_rings(phba); 9809 9810 /* stop all timers */ 9811 lpfc_stop_hba_timers(phba); 9812 9813 /* Disable interrupt and pci device */ 9814 lpfc_sli_disable_intr(phba); 9815 pci_disable_device(phba->pcidev); 9816 } 9817 9818 /** 9819 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 9820 * @phba: pointer to lpfc hba data structure. 9821 * 9822 * This routine is called to prepare the SLI3 device for PCI slot permanently 9823 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 9824 * pending I/Os. 9825 **/ 9826 static void 9827 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 9828 { 9829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9830 "2711 PCI channel permanent disable for failure\n"); 9831 /* Block all SCSI devices' I/Os on the host */ 9832 lpfc_scsi_dev_block(phba); 9833 9834 /* stop all timers */ 9835 lpfc_stop_hba_timers(phba); 9836 9837 /* Clean up all driver's outstanding SCSI I/Os */ 9838 lpfc_sli_flush_fcp_rings(phba); 9839 } 9840 9841 /** 9842 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 9843 * @pdev: pointer to PCI device. 9844 * @state: the current PCI connection state. 9845 * 9846 * This routine is called from the PCI subsystem for I/O error handling to 9847 * device with SLI-3 interface spec. This function is called by the PCI 9848 * subsystem after a PCI bus error affecting this device has been detected. 9849 * When this function is invoked, it will need to stop all the I/Os and 9850 * interrupt(s) to the device. Once that is done, it will return 9851 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 9852 * as desired. 9853 * 9854 * Return codes 9855 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 9856 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9857 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9858 **/ 9859 static pci_ers_result_t 9860 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 9861 { 9862 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9863 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9864 9865 switch (state) { 9866 case pci_channel_io_normal: 9867 /* Non-fatal error, prepare for recovery */ 9868 lpfc_sli_prep_dev_for_recover(phba); 9869 return PCI_ERS_RESULT_CAN_RECOVER; 9870 case pci_channel_io_frozen: 9871 /* Fatal error, prepare for slot reset */ 9872 lpfc_sli_prep_dev_for_reset(phba); 9873 return PCI_ERS_RESULT_NEED_RESET; 9874 case pci_channel_io_perm_failure: 9875 /* Permanent failure, prepare for device down */ 9876 lpfc_sli_prep_dev_for_perm_failure(phba); 9877 return PCI_ERS_RESULT_DISCONNECT; 9878 default: 9879 /* Unknown state, prepare and request slot reset */ 9880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9881 "0472 Unknown PCI error state: x%x\n", state); 9882 lpfc_sli_prep_dev_for_reset(phba); 9883 return PCI_ERS_RESULT_NEED_RESET; 9884 } 9885 } 9886 9887 /** 9888 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 9889 * @pdev: pointer to PCI device. 9890 * 9891 * This routine is called from the PCI subsystem for error handling to 9892 * device with SLI-3 interface spec. This is called after PCI bus has been 9893 * reset to restart the PCI card from scratch, as if from a cold-boot. 9894 * During the PCI subsystem error recovery, after driver returns 9895 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 9896 * recovery and then call this routine before calling the .resume method 9897 * to recover the device. This function will initialize the HBA device, 9898 * enable the interrupt, but it will just put the HBA to offline state 9899 * without passing any I/O traffic. 9900 * 9901 * Return codes 9902 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9903 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9904 */ 9905 static pci_ers_result_t 9906 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 9907 { 9908 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9909 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9910 struct lpfc_sli *psli = &phba->sli; 9911 uint32_t intr_mode; 9912 9913 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9914 if (pci_enable_device_mem(pdev)) { 9915 printk(KERN_ERR "lpfc: Cannot re-enable " 9916 "PCI device after reset.\n"); 9917 return PCI_ERS_RESULT_DISCONNECT; 9918 } 9919 9920 pci_restore_state(pdev); 9921 9922 /* 9923 * As the new kernel behavior of pci_restore_state() API call clears 9924 * device saved_state flag, need to save the restored state again. 9925 */ 9926 pci_save_state(pdev); 9927 9928 if (pdev->is_busmaster) 9929 pci_set_master(pdev); 9930 9931 spin_lock_irq(&phba->hbalock); 9932 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9933 spin_unlock_irq(&phba->hbalock); 9934 9935 /* Configure and enable interrupt */ 9936 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 9937 if (intr_mode == LPFC_INTR_ERROR) { 9938 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9939 "0427 Cannot re-enable interrupt after " 9940 "slot reset.\n"); 9941 return PCI_ERS_RESULT_DISCONNECT; 9942 } else 9943 phba->intr_mode = intr_mode; 9944 9945 /* Take device offline, it will perform cleanup */ 9946 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 9947 lpfc_offline(phba); 9948 lpfc_sli_brdrestart(phba); 9949 9950 /* Log the current active interrupt mode */ 9951 lpfc_log_intr_mode(phba, phba->intr_mode); 9952 9953 return PCI_ERS_RESULT_RECOVERED; 9954 } 9955 9956 /** 9957 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 9958 * @pdev: pointer to PCI device 9959 * 9960 * This routine is called from the PCI subsystem for error handling to device 9961 * with SLI-3 interface spec. It is called when kernel error recovery tells 9962 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9963 * error recovery. After this call, traffic can start to flow from this device 9964 * again. 9965 */ 9966 static void 9967 lpfc_io_resume_s3(struct pci_dev *pdev) 9968 { 9969 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9970 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9971 9972 /* Bring device online, it will be no-op for non-fatal error resume */ 9973 lpfc_online(phba); 9974 9975 /* Clean up Advanced Error Reporting (AER) if needed */ 9976 if (phba->hba_flag & HBA_AER_ENABLED) 9977 pci_cleanup_aer_uncorrect_error_status(pdev); 9978 } 9979 9980 /** 9981 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 9982 * @phba: pointer to lpfc hba data structure. 9983 * 9984 * returns the number of ELS/CT IOCBs to reserve 9985 **/ 9986 int 9987 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 9988 { 9989 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 9990 9991 if (phba->sli_rev == LPFC_SLI_REV4) { 9992 if (max_xri <= 100) 9993 return 10; 9994 else if (max_xri <= 256) 9995 return 25; 9996 else if (max_xri <= 512) 9997 return 50; 9998 else if (max_xri <= 1024) 9999 return 100; 10000 else if (max_xri <= 1536) 10001 return 150; 10002 else if (max_xri <= 2048) 10003 return 200; 10004 else 10005 return 250; 10006 } else 10007 return 0; 10008 } 10009 10010 /** 10011 * lpfc_write_firmware - attempt to write a firmware image to the port 10012 * @fw: pointer to firmware image returned from request_firmware. 10013 * @phba: pointer to lpfc hba data structure. 10014 * 10015 **/ 10016 static void 10017 lpfc_write_firmware(const struct firmware *fw, void *context) 10018 { 10019 struct lpfc_hba *phba = (struct lpfc_hba *)context; 10020 char fwrev[FW_REV_STR_SIZE]; 10021 struct lpfc_grp_hdr *image; 10022 struct list_head dma_buffer_list; 10023 int i, rc = 0; 10024 struct lpfc_dmabuf *dmabuf, *next; 10025 uint32_t offset = 0, temp_offset = 0; 10026 10027 /* It can be null in no-wait mode, sanity check */ 10028 if (!fw) { 10029 rc = -ENXIO; 10030 goto out; 10031 } 10032 image = (struct lpfc_grp_hdr *)fw->data; 10033 10034 INIT_LIST_HEAD(&dma_buffer_list); 10035 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || 10036 (bf_get_be32(lpfc_grp_hdr_file_type, image) != 10037 LPFC_FILE_TYPE_GROUP) || 10038 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || 10039 (be32_to_cpu(image->size) != fw->size)) { 10040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10041 "3022 Invalid FW image found. " 10042 "Magic:%x Type:%x ID:%x\n", 10043 be32_to_cpu(image->magic_number), 10044 bf_get_be32(lpfc_grp_hdr_file_type, image), 10045 bf_get_be32(lpfc_grp_hdr_id, image)); 10046 rc = -EINVAL; 10047 goto release_out; 10048 } 10049 lpfc_decode_firmware_rev(phba, fwrev, 1); 10050 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 10051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10052 "3023 Updating Firmware, Current Version:%s " 10053 "New Version:%s\n", 10054 fwrev, image->revision); 10055 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 10056 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 10057 GFP_KERNEL); 10058 if (!dmabuf) { 10059 rc = -ENOMEM; 10060 goto release_out; 10061 } 10062 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 10063 SLI4_PAGE_SIZE, 10064 &dmabuf->phys, 10065 GFP_KERNEL); 10066 if (!dmabuf->virt) { 10067 kfree(dmabuf); 10068 rc = -ENOMEM; 10069 goto release_out; 10070 } 10071 list_add_tail(&dmabuf->list, &dma_buffer_list); 10072 } 10073 while (offset < fw->size) { 10074 temp_offset = offset; 10075 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 10076 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 10077 memcpy(dmabuf->virt, 10078 fw->data + temp_offset, 10079 fw->size - temp_offset); 10080 temp_offset = fw->size; 10081 break; 10082 } 10083 memcpy(dmabuf->virt, fw->data + temp_offset, 10084 SLI4_PAGE_SIZE); 10085 temp_offset += SLI4_PAGE_SIZE; 10086 } 10087 rc = lpfc_wr_object(phba, &dma_buffer_list, 10088 (fw->size - offset), &offset); 10089 if (rc) 10090 goto release_out; 10091 } 10092 rc = offset; 10093 } 10094 10095 release_out: 10096 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 10097 list_del(&dmabuf->list); 10098 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 10099 dmabuf->virt, dmabuf->phys); 10100 kfree(dmabuf); 10101 } 10102 release_firmware(fw); 10103 out: 10104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10105 "3024 Firmware update done: %d.\n", rc); 10106 return; 10107 } 10108 10109 /** 10110 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 10111 * @phba: pointer to lpfc hba data structure. 10112 * 10113 * This routine is called to perform Linux generic firmware upgrade on device 10114 * that supports such feature. 10115 **/ 10116 int 10117 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 10118 { 10119 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 10120 int ret; 10121 const struct firmware *fw; 10122 10123 /* Only supported on SLI4 interface type 2 for now */ 10124 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 10125 LPFC_SLI_INTF_IF_TYPE_2) 10126 return -EPERM; 10127 10128 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 10129 10130 if (fw_upgrade == INT_FW_UPGRADE) { 10131 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 10132 file_name, &phba->pcidev->dev, 10133 GFP_KERNEL, (void *)phba, 10134 lpfc_write_firmware); 10135 } else if (fw_upgrade == RUN_FW_UPGRADE) { 10136 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 10137 if (!ret) 10138 lpfc_write_firmware(fw, (void *)phba); 10139 } else { 10140 ret = -EINVAL; 10141 } 10142 10143 return ret; 10144 } 10145 10146 /** 10147 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 10148 * @pdev: pointer to PCI device 10149 * @pid: pointer to PCI device identifier 10150 * 10151 * This routine is called from the kernel's PCI subsystem to device with 10152 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10153 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 10154 * information of the device and driver to see if the driver state that it 10155 * can support this kind of device. If the match is successful, the driver 10156 * core invokes this routine. If this routine determines it can claim the HBA, 10157 * it does all the initialization that it needs to do to handle the HBA 10158 * properly. 10159 * 10160 * Return code 10161 * 0 - driver can claim the device 10162 * negative value - driver can not claim the device 10163 **/ 10164 static int 10165 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 10166 { 10167 struct lpfc_hba *phba; 10168 struct lpfc_vport *vport = NULL; 10169 struct Scsi_Host *shost = NULL; 10170 int error, ret; 10171 uint32_t cfg_mode, intr_mode; 10172 int adjusted_fcp_io_channel; 10173 10174 /* Allocate memory for HBA structure */ 10175 phba = lpfc_hba_alloc(pdev); 10176 if (!phba) 10177 return -ENOMEM; 10178 10179 /* Perform generic PCI device enabling operation */ 10180 error = lpfc_enable_pci_dev(phba); 10181 if (error) 10182 goto out_free_phba; 10183 10184 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 10185 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 10186 if (error) 10187 goto out_disable_pci_dev; 10188 10189 /* Set up SLI-4 specific device PCI memory space */ 10190 error = lpfc_sli4_pci_mem_setup(phba); 10191 if (error) { 10192 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10193 "1410 Failed to set up pci memory space.\n"); 10194 goto out_disable_pci_dev; 10195 } 10196 10197 /* Set up phase-1 common device driver resources */ 10198 error = lpfc_setup_driver_resource_phase1(phba); 10199 if (error) { 10200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10201 "1411 Failed to set up driver resource.\n"); 10202 goto out_unset_pci_mem_s4; 10203 } 10204 10205 /* Set up SLI-4 Specific device driver resources */ 10206 error = lpfc_sli4_driver_resource_setup(phba); 10207 if (error) { 10208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10209 "1412 Failed to set up driver resource.\n"); 10210 goto out_unset_pci_mem_s4; 10211 } 10212 10213 /* Initialize and populate the iocb list per host */ 10214 10215 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10216 "2821 initialize iocb list %d.\n", 10217 phba->cfg_iocb_cnt*1024); 10218 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 10219 10220 if (error) { 10221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10222 "1413 Failed to initialize iocb list.\n"); 10223 goto out_unset_driver_resource_s4; 10224 } 10225 10226 INIT_LIST_HEAD(&phba->active_rrq_list); 10227 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 10228 10229 /* Set up common device driver resources */ 10230 error = lpfc_setup_driver_resource_phase2(phba); 10231 if (error) { 10232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10233 "1414 Failed to set up driver resource.\n"); 10234 goto out_free_iocb_list; 10235 } 10236 10237 /* Get the default values for Model Name and Description */ 10238 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 10239 10240 /* Create SCSI host to the physical port */ 10241 error = lpfc_create_shost(phba); 10242 if (error) { 10243 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10244 "1415 Failed to create scsi host.\n"); 10245 goto out_unset_driver_resource; 10246 } 10247 10248 /* Configure sysfs attributes */ 10249 vport = phba->pport; 10250 error = lpfc_alloc_sysfs_attr(vport); 10251 if (error) { 10252 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10253 "1416 Failed to allocate sysfs attr\n"); 10254 goto out_destroy_shost; 10255 } 10256 10257 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 10258 /* Now, trying to enable interrupt and bring up the device */ 10259 cfg_mode = phba->cfg_use_msi; 10260 10261 /* Put device to a known state before enabling interrupt */ 10262 lpfc_stop_port(phba); 10263 /* Configure and enable interrupt */ 10264 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 10265 if (intr_mode == LPFC_INTR_ERROR) { 10266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10267 "0426 Failed to enable interrupt.\n"); 10268 error = -ENODEV; 10269 goto out_free_sysfs_attr; 10270 } 10271 /* Default to single EQ for non-MSI-X */ 10272 if (phba->intr_type != MSIX) 10273 adjusted_fcp_io_channel = 1; 10274 else 10275 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; 10276 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; 10277 /* Set up SLI-4 HBA */ 10278 if (lpfc_sli4_hba_setup(phba)) { 10279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10280 "1421 Failed to set up hba\n"); 10281 error = -ENODEV; 10282 goto out_disable_intr; 10283 } 10284 10285 /* Log the current active interrupt mode */ 10286 phba->intr_mode = intr_mode; 10287 lpfc_log_intr_mode(phba, intr_mode); 10288 10289 /* Perform post initialization setup */ 10290 lpfc_post_init_setup(phba); 10291 10292 /* check for firmware upgrade or downgrade */ 10293 if (phba->cfg_request_firmware_upgrade) 10294 ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 10295 10296 /* Check if there are static vports to be created. */ 10297 lpfc_create_static_vport(phba); 10298 return 0; 10299 10300 out_disable_intr: 10301 lpfc_sli4_disable_intr(phba); 10302 out_free_sysfs_attr: 10303 lpfc_free_sysfs_attr(vport); 10304 out_destroy_shost: 10305 lpfc_destroy_shost(phba); 10306 out_unset_driver_resource: 10307 lpfc_unset_driver_resource_phase2(phba); 10308 out_free_iocb_list: 10309 lpfc_free_iocb_list(phba); 10310 out_unset_driver_resource_s4: 10311 lpfc_sli4_driver_resource_unset(phba); 10312 out_unset_pci_mem_s4: 10313 lpfc_sli4_pci_mem_unset(phba); 10314 out_disable_pci_dev: 10315 lpfc_disable_pci_dev(phba); 10316 if (shost) 10317 scsi_host_put(shost); 10318 out_free_phba: 10319 lpfc_hba_free(phba); 10320 return error; 10321 } 10322 10323 /** 10324 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 10325 * @pdev: pointer to PCI device 10326 * 10327 * This routine is called from the kernel's PCI subsystem to device with 10328 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10329 * removed from PCI bus, it performs all the necessary cleanup for the HBA 10330 * device to be removed from the PCI subsystem properly. 10331 **/ 10332 static void 10333 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 10334 { 10335 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10336 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 10337 struct lpfc_vport **vports; 10338 struct lpfc_hba *phba = vport->phba; 10339 int i; 10340 10341 /* Mark the device unloading flag */ 10342 spin_lock_irq(&phba->hbalock); 10343 vport->load_flag |= FC_UNLOADING; 10344 spin_unlock_irq(&phba->hbalock); 10345 10346 /* Free the HBA sysfs attributes */ 10347 lpfc_free_sysfs_attr(vport); 10348 10349 /* Release all the vports against this physical port */ 10350 vports = lpfc_create_vport_work_array(phba); 10351 if (vports != NULL) 10352 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10353 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 10354 continue; 10355 fc_vport_terminate(vports[i]->fc_vport); 10356 } 10357 lpfc_destroy_vport_work_array(phba, vports); 10358 10359 /* Remove FC host and then SCSI host with the physical port */ 10360 fc_remove_host(shost); 10361 scsi_remove_host(shost); 10362 10363 /* Perform cleanup on the physical port */ 10364 lpfc_cleanup(vport); 10365 10366 /* 10367 * Bring down the SLI Layer. This step disables all interrupts, 10368 * clears the rings, discards all mailbox commands, and resets 10369 * the HBA FCoE function. 10370 */ 10371 lpfc_debugfs_terminate(vport); 10372 lpfc_sli4_hba_unset(phba); 10373 10374 spin_lock_irq(&phba->hbalock); 10375 list_del_init(&vport->listentry); 10376 spin_unlock_irq(&phba->hbalock); 10377 10378 /* Perform scsi free before driver resource_unset since scsi 10379 * buffers are released to their corresponding pools here. 10380 */ 10381 lpfc_scsi_free(phba); 10382 10383 lpfc_sli4_driver_resource_unset(phba); 10384 10385 /* Unmap adapter Control and Doorbell registers */ 10386 lpfc_sli4_pci_mem_unset(phba); 10387 10388 /* Release PCI resources and disable device's PCI function */ 10389 scsi_host_put(shost); 10390 lpfc_disable_pci_dev(phba); 10391 10392 /* Finally, free the driver's device data structure */ 10393 lpfc_hba_free(phba); 10394 10395 return; 10396 } 10397 10398 /** 10399 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 10400 * @pdev: pointer to PCI device 10401 * @msg: power management message 10402 * 10403 * This routine is called from the kernel's PCI subsystem to support system 10404 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 10405 * this method, it quiesces the device by stopping the driver's worker 10406 * thread for the device, turning off device's interrupt and DMA, and bring 10407 * the device offline. Note that as the driver implements the minimum PM 10408 * requirements to a power-aware driver's PM support for suspend/resume -- all 10409 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 10410 * method call will be treated as SUSPEND and the driver will fully 10411 * reinitialize its device during resume() method call, the driver will set 10412 * device to PCI_D3hot state in PCI config space instead of setting it 10413 * according to the @msg provided by the PM. 10414 * 10415 * Return code 10416 * 0 - driver suspended the device 10417 * Error otherwise 10418 **/ 10419 static int 10420 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 10421 { 10422 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10423 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10424 10425 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10426 "2843 PCI device Power Management suspend.\n"); 10427 10428 /* Bring down the device */ 10429 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10430 lpfc_offline(phba); 10431 kthread_stop(phba->worker_thread); 10432 10433 /* Disable interrupt from device */ 10434 lpfc_sli4_disable_intr(phba); 10435 lpfc_sli4_queue_destroy(phba); 10436 10437 /* Save device state to PCI config space */ 10438 pci_save_state(pdev); 10439 pci_set_power_state(pdev, PCI_D3hot); 10440 10441 return 0; 10442 } 10443 10444 /** 10445 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 10446 * @pdev: pointer to PCI device 10447 * 10448 * This routine is called from the kernel's PCI subsystem to support system 10449 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 10450 * this method, it restores the device's PCI config space state and fully 10451 * reinitializes the device and brings it online. Note that as the driver 10452 * implements the minimum PM requirements to a power-aware driver's PM for 10453 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 10454 * to the suspend() method call will be treated as SUSPEND and the driver 10455 * will fully reinitialize its device during resume() method call, the device 10456 * will be set to PCI_D0 directly in PCI config space before restoring the 10457 * state. 10458 * 10459 * Return code 10460 * 0 - driver suspended the device 10461 * Error otherwise 10462 **/ 10463 static int 10464 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 10465 { 10466 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10467 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10468 uint32_t intr_mode; 10469 int error; 10470 10471 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10472 "0292 PCI device Power Management resume.\n"); 10473 10474 /* Restore device state from PCI config space */ 10475 pci_set_power_state(pdev, PCI_D0); 10476 pci_restore_state(pdev); 10477 10478 /* 10479 * As the new kernel behavior of pci_restore_state() API call clears 10480 * device saved_state flag, need to save the restored state again. 10481 */ 10482 pci_save_state(pdev); 10483 10484 if (pdev->is_busmaster) 10485 pci_set_master(pdev); 10486 10487 /* Startup the kernel thread for this host adapter. */ 10488 phba->worker_thread = kthread_run(lpfc_do_work, phba, 10489 "lpfc_worker_%d", phba->brd_no); 10490 if (IS_ERR(phba->worker_thread)) { 10491 error = PTR_ERR(phba->worker_thread); 10492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10493 "0293 PM resume failed to start worker " 10494 "thread: error=x%x.\n", error); 10495 return error; 10496 } 10497 10498 /* Configure and enable interrupt */ 10499 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10500 if (intr_mode == LPFC_INTR_ERROR) { 10501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10502 "0294 PM resume Failed to enable interrupt\n"); 10503 return -EIO; 10504 } else 10505 phba->intr_mode = intr_mode; 10506 10507 /* Restart HBA and bring it online */ 10508 lpfc_sli_brdrestart(phba); 10509 lpfc_online(phba); 10510 10511 /* Log the current active interrupt mode */ 10512 lpfc_log_intr_mode(phba, phba->intr_mode); 10513 10514 return 0; 10515 } 10516 10517 /** 10518 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 10519 * @phba: pointer to lpfc hba data structure. 10520 * 10521 * This routine is called to prepare the SLI4 device for PCI slot recover. It 10522 * aborts all the outstanding SCSI I/Os to the pci device. 10523 **/ 10524 static void 10525 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 10526 { 10527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10528 "2828 PCI channel I/O abort preparing for recovery\n"); 10529 /* 10530 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10531 * and let the SCSI mid-layer to retry them to recover. 10532 */ 10533 lpfc_sli_abort_fcp_rings(phba); 10534 } 10535 10536 /** 10537 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 10538 * @phba: pointer to lpfc hba data structure. 10539 * 10540 * This routine is called to prepare the SLI4 device for PCI slot reset. It 10541 * disables the device interrupt and pci device, and aborts the internal FCP 10542 * pending I/Os. 10543 **/ 10544 static void 10545 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 10546 { 10547 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10548 "2826 PCI channel disable preparing for reset\n"); 10549 10550 /* Block any management I/Os to the device */ 10551 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 10552 10553 /* Block all SCSI devices' I/Os on the host */ 10554 lpfc_scsi_dev_block(phba); 10555 10556 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10557 lpfc_sli_flush_fcp_rings(phba); 10558 10559 /* stop all timers */ 10560 lpfc_stop_hba_timers(phba); 10561 10562 /* Disable interrupt and pci device */ 10563 lpfc_sli4_disable_intr(phba); 10564 lpfc_sli4_queue_destroy(phba); 10565 pci_disable_device(phba->pcidev); 10566 } 10567 10568 /** 10569 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 10570 * @phba: pointer to lpfc hba data structure. 10571 * 10572 * This routine is called to prepare the SLI4 device for PCI slot permanently 10573 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10574 * pending I/Os. 10575 **/ 10576 static void 10577 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10578 { 10579 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10580 "2827 PCI channel permanent disable for failure\n"); 10581 10582 /* Block all SCSI devices' I/Os on the host */ 10583 lpfc_scsi_dev_block(phba); 10584 10585 /* stop all timers */ 10586 lpfc_stop_hba_timers(phba); 10587 10588 /* Clean up all driver's outstanding SCSI I/Os */ 10589 lpfc_sli_flush_fcp_rings(phba); 10590 } 10591 10592 /** 10593 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 10594 * @pdev: pointer to PCI device. 10595 * @state: the current PCI connection state. 10596 * 10597 * This routine is called from the PCI subsystem for error handling to device 10598 * with SLI-4 interface spec. This function is called by the PCI subsystem 10599 * after a PCI bus error affecting this device has been detected. When this 10600 * function is invoked, it will need to stop all the I/Os and interrupt(s) 10601 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 10602 * for the PCI subsystem to perform proper recovery as desired. 10603 * 10604 * Return codes 10605 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10606 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10607 **/ 10608 static pci_ers_result_t 10609 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 10610 { 10611 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10612 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10613 10614 switch (state) { 10615 case pci_channel_io_normal: 10616 /* Non-fatal error, prepare for recovery */ 10617 lpfc_sli4_prep_dev_for_recover(phba); 10618 return PCI_ERS_RESULT_CAN_RECOVER; 10619 case pci_channel_io_frozen: 10620 /* Fatal error, prepare for slot reset */ 10621 lpfc_sli4_prep_dev_for_reset(phba); 10622 return PCI_ERS_RESULT_NEED_RESET; 10623 case pci_channel_io_perm_failure: 10624 /* Permanent failure, prepare for device down */ 10625 lpfc_sli4_prep_dev_for_perm_failure(phba); 10626 return PCI_ERS_RESULT_DISCONNECT; 10627 default: 10628 /* Unknown state, prepare and request slot reset */ 10629 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10630 "2825 Unknown PCI error state: x%x\n", state); 10631 lpfc_sli4_prep_dev_for_reset(phba); 10632 return PCI_ERS_RESULT_NEED_RESET; 10633 } 10634 } 10635 10636 /** 10637 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 10638 * @pdev: pointer to PCI device. 10639 * 10640 * This routine is called from the PCI subsystem for error handling to device 10641 * with SLI-4 interface spec. It is called after PCI bus has been reset to 10642 * restart the PCI card from scratch, as if from a cold-boot. During the 10643 * PCI subsystem error recovery, after the driver returns 10644 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10645 * recovery and then call this routine before calling the .resume method to 10646 * recover the device. This function will initialize the HBA device, enable 10647 * the interrupt, but it will just put the HBA to offline state without 10648 * passing any I/O traffic. 10649 * 10650 * Return codes 10651 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10652 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10653 */ 10654 static pci_ers_result_t 10655 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 10656 { 10657 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10658 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10659 struct lpfc_sli *psli = &phba->sli; 10660 uint32_t intr_mode; 10661 10662 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10663 if (pci_enable_device_mem(pdev)) { 10664 printk(KERN_ERR "lpfc: Cannot re-enable " 10665 "PCI device after reset.\n"); 10666 return PCI_ERS_RESULT_DISCONNECT; 10667 } 10668 10669 pci_restore_state(pdev); 10670 10671 /* 10672 * As the new kernel behavior of pci_restore_state() API call clears 10673 * device saved_state flag, need to save the restored state again. 10674 */ 10675 pci_save_state(pdev); 10676 10677 if (pdev->is_busmaster) 10678 pci_set_master(pdev); 10679 10680 spin_lock_irq(&phba->hbalock); 10681 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10682 spin_unlock_irq(&phba->hbalock); 10683 10684 /* Configure and enable interrupt */ 10685 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10686 if (intr_mode == LPFC_INTR_ERROR) { 10687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10688 "2824 Cannot re-enable interrupt after " 10689 "slot reset.\n"); 10690 return PCI_ERS_RESULT_DISCONNECT; 10691 } else 10692 phba->intr_mode = intr_mode; 10693 10694 /* Log the current active interrupt mode */ 10695 lpfc_log_intr_mode(phba, phba->intr_mode); 10696 10697 return PCI_ERS_RESULT_RECOVERED; 10698 } 10699 10700 /** 10701 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 10702 * @pdev: pointer to PCI device 10703 * 10704 * This routine is called from the PCI subsystem for error handling to device 10705 * with SLI-4 interface spec. It is called when kernel error recovery tells 10706 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10707 * error recovery. After this call, traffic can start to flow from this device 10708 * again. 10709 **/ 10710 static void 10711 lpfc_io_resume_s4(struct pci_dev *pdev) 10712 { 10713 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10714 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10715 10716 /* 10717 * In case of slot reset, as function reset is performed through 10718 * mailbox command which needs DMA to be enabled, this operation 10719 * has to be moved to the io resume phase. Taking device offline 10720 * will perform the necessary cleanup. 10721 */ 10722 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 10723 /* Perform device reset */ 10724 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10725 lpfc_offline(phba); 10726 lpfc_sli_brdrestart(phba); 10727 /* Bring the device back online */ 10728 lpfc_online(phba); 10729 } 10730 10731 /* Clean up Advanced Error Reporting (AER) if needed */ 10732 if (phba->hba_flag & HBA_AER_ENABLED) 10733 pci_cleanup_aer_uncorrect_error_status(pdev); 10734 } 10735 10736 /** 10737 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 10738 * @pdev: pointer to PCI device 10739 * @pid: pointer to PCI device identifier 10740 * 10741 * This routine is to be registered to the kernel's PCI subsystem. When an 10742 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 10743 * at PCI device-specific information of the device and driver to see if the 10744 * driver state that it can support this kind of device. If the match is 10745 * successful, the driver core invokes this routine. This routine dispatches 10746 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 10747 * do all the initialization that it needs to do to handle the HBA device 10748 * properly. 10749 * 10750 * Return code 10751 * 0 - driver can claim the device 10752 * negative value - driver can not claim the device 10753 **/ 10754 static int 10755 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 10756 { 10757 int rc; 10758 struct lpfc_sli_intf intf; 10759 10760 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 10761 return -ENODEV; 10762 10763 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 10764 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 10765 rc = lpfc_pci_probe_one_s4(pdev, pid); 10766 else 10767 rc = lpfc_pci_probe_one_s3(pdev, pid); 10768 10769 return rc; 10770 } 10771 10772 /** 10773 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 10774 * @pdev: pointer to PCI device 10775 * 10776 * This routine is to be registered to the kernel's PCI subsystem. When an 10777 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 10778 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 10779 * remove routine, which will perform all the necessary cleanup for the 10780 * device to be removed from the PCI subsystem properly. 10781 **/ 10782 static void 10783 lpfc_pci_remove_one(struct pci_dev *pdev) 10784 { 10785 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10786 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10787 10788 switch (phba->pci_dev_grp) { 10789 case LPFC_PCI_DEV_LP: 10790 lpfc_pci_remove_one_s3(pdev); 10791 break; 10792 case LPFC_PCI_DEV_OC: 10793 lpfc_pci_remove_one_s4(pdev); 10794 break; 10795 default: 10796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10797 "1424 Invalid PCI device group: 0x%x\n", 10798 phba->pci_dev_grp); 10799 break; 10800 } 10801 return; 10802 } 10803 10804 /** 10805 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 10806 * @pdev: pointer to PCI device 10807 * @msg: power management message 10808 * 10809 * This routine is to be registered to the kernel's PCI subsystem to support 10810 * system Power Management (PM). When PM invokes this method, it dispatches 10811 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 10812 * suspend the device. 10813 * 10814 * Return code 10815 * 0 - driver suspended the device 10816 * Error otherwise 10817 **/ 10818 static int 10819 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 10820 { 10821 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10822 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10823 int rc = -ENODEV; 10824 10825 switch (phba->pci_dev_grp) { 10826 case LPFC_PCI_DEV_LP: 10827 rc = lpfc_pci_suspend_one_s3(pdev, msg); 10828 break; 10829 case LPFC_PCI_DEV_OC: 10830 rc = lpfc_pci_suspend_one_s4(pdev, msg); 10831 break; 10832 default: 10833 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10834 "1425 Invalid PCI device group: 0x%x\n", 10835 phba->pci_dev_grp); 10836 break; 10837 } 10838 return rc; 10839 } 10840 10841 /** 10842 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 10843 * @pdev: pointer to PCI device 10844 * 10845 * This routine is to be registered to the kernel's PCI subsystem to support 10846 * system Power Management (PM). When PM invokes this method, it dispatches 10847 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 10848 * resume the device. 10849 * 10850 * Return code 10851 * 0 - driver suspended the device 10852 * Error otherwise 10853 **/ 10854 static int 10855 lpfc_pci_resume_one(struct pci_dev *pdev) 10856 { 10857 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10858 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10859 int rc = -ENODEV; 10860 10861 switch (phba->pci_dev_grp) { 10862 case LPFC_PCI_DEV_LP: 10863 rc = lpfc_pci_resume_one_s3(pdev); 10864 break; 10865 case LPFC_PCI_DEV_OC: 10866 rc = lpfc_pci_resume_one_s4(pdev); 10867 break; 10868 default: 10869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10870 "1426 Invalid PCI device group: 0x%x\n", 10871 phba->pci_dev_grp); 10872 break; 10873 } 10874 return rc; 10875 } 10876 10877 /** 10878 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 10879 * @pdev: pointer to PCI device. 10880 * @state: the current PCI connection state. 10881 * 10882 * This routine is registered to the PCI subsystem for error handling. This 10883 * function is called by the PCI subsystem after a PCI bus error affecting 10884 * this device has been detected. When this routine is invoked, it dispatches 10885 * the action to the proper SLI-3 or SLI-4 device error detected handling 10886 * routine, which will perform the proper error detected operation. 10887 * 10888 * Return codes 10889 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10890 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10891 **/ 10892 static pci_ers_result_t 10893 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 10894 { 10895 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10896 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10897 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 10898 10899 switch (phba->pci_dev_grp) { 10900 case LPFC_PCI_DEV_LP: 10901 rc = lpfc_io_error_detected_s3(pdev, state); 10902 break; 10903 case LPFC_PCI_DEV_OC: 10904 rc = lpfc_io_error_detected_s4(pdev, state); 10905 break; 10906 default: 10907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10908 "1427 Invalid PCI device group: 0x%x\n", 10909 phba->pci_dev_grp); 10910 break; 10911 } 10912 return rc; 10913 } 10914 10915 /** 10916 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 10917 * @pdev: pointer to PCI device. 10918 * 10919 * This routine is registered to the PCI subsystem for error handling. This 10920 * function is called after PCI bus has been reset to restart the PCI card 10921 * from scratch, as if from a cold-boot. When this routine is invoked, it 10922 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 10923 * routine, which will perform the proper device reset. 10924 * 10925 * Return codes 10926 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10927 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10928 **/ 10929 static pci_ers_result_t 10930 lpfc_io_slot_reset(struct pci_dev *pdev) 10931 { 10932 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10933 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10934 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 10935 10936 switch (phba->pci_dev_grp) { 10937 case LPFC_PCI_DEV_LP: 10938 rc = lpfc_io_slot_reset_s3(pdev); 10939 break; 10940 case LPFC_PCI_DEV_OC: 10941 rc = lpfc_io_slot_reset_s4(pdev); 10942 break; 10943 default: 10944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10945 "1428 Invalid PCI device group: 0x%x\n", 10946 phba->pci_dev_grp); 10947 break; 10948 } 10949 return rc; 10950 } 10951 10952 /** 10953 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 10954 * @pdev: pointer to PCI device 10955 * 10956 * This routine is registered to the PCI subsystem for error handling. It 10957 * is called when kernel error recovery tells the lpfc driver that it is 10958 * OK to resume normal PCI operation after PCI bus error recovery. When 10959 * this routine is invoked, it dispatches the action to the proper SLI-3 10960 * or SLI-4 device io_resume routine, which will resume the device operation. 10961 **/ 10962 static void 10963 lpfc_io_resume(struct pci_dev *pdev) 10964 { 10965 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10966 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10967 10968 switch (phba->pci_dev_grp) { 10969 case LPFC_PCI_DEV_LP: 10970 lpfc_io_resume_s3(pdev); 10971 break; 10972 case LPFC_PCI_DEV_OC: 10973 lpfc_io_resume_s4(pdev); 10974 break; 10975 default: 10976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10977 "1429 Invalid PCI device group: 0x%x\n", 10978 phba->pci_dev_grp); 10979 break; 10980 } 10981 return; 10982 } 10983 10984 /** 10985 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 10986 * @phba: pointer to lpfc hba data structure. 10987 * 10988 * This routine checks to see if OAS is supported for this adapter. If 10989 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 10990 * the enable oas flag is cleared and the pool created for OAS device data 10991 * is destroyed. 10992 * 10993 **/ 10994 void 10995 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 10996 { 10997 10998 if (!phba->cfg_EnableXLane) 10999 return; 11000 11001 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 11002 phba->cfg_fof = 1; 11003 } else { 11004 phba->cfg_fof = 0; 11005 if (phba->device_data_mem_pool) 11006 mempool_destroy(phba->device_data_mem_pool); 11007 phba->device_data_mem_pool = NULL; 11008 } 11009 11010 return; 11011 } 11012 11013 /** 11014 * lpfc_fof_queue_setup - Set up all the fof queues 11015 * @phba: pointer to lpfc hba data structure. 11016 * 11017 * This routine is invoked to set up all the fof queues for the FC HBA 11018 * operation. 11019 * 11020 * Return codes 11021 * 0 - successful 11022 * -ENOMEM - No available memory 11023 **/ 11024 int 11025 lpfc_fof_queue_setup(struct lpfc_hba *phba) 11026 { 11027 struct lpfc_sli *psli = &phba->sli; 11028 int rc; 11029 11030 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); 11031 if (rc) 11032 return -ENOMEM; 11033 11034 if (phba->cfg_fof) { 11035 11036 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 11037 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 11038 if (rc) 11039 goto out_oas_cq; 11040 11041 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, 11042 phba->sli4_hba.oas_cq, LPFC_FCP); 11043 if (rc) 11044 goto out_oas_wq; 11045 11046 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; 11047 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; 11048 } 11049 11050 return 0; 11051 11052 out_oas_wq: 11053 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 11054 out_oas_cq: 11055 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 11056 return rc; 11057 11058 } 11059 11060 /** 11061 * lpfc_fof_queue_create - Create all the fof queues 11062 * @phba: pointer to lpfc hba data structure. 11063 * 11064 * This routine is invoked to allocate all the fof queues for the FC HBA 11065 * operation. For each SLI4 queue type, the parameters such as queue entry 11066 * count (queue depth) shall be taken from the module parameter. For now, 11067 * we just use some constant number as place holder. 11068 * 11069 * Return codes 11070 * 0 - successful 11071 * -ENOMEM - No availble memory 11072 * -EIO - The mailbox failed to complete successfully. 11073 **/ 11074 int 11075 lpfc_fof_queue_create(struct lpfc_hba *phba) 11076 { 11077 struct lpfc_queue *qdesc; 11078 11079 /* Create FOF EQ */ 11080 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 11081 phba->sli4_hba.eq_ecount); 11082 if (!qdesc) 11083 goto out_error; 11084 11085 phba->sli4_hba.fof_eq = qdesc; 11086 11087 if (phba->cfg_fof) { 11088 11089 /* Create OAS CQ */ 11090 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 11091 phba->sli4_hba.cq_ecount); 11092 if (!qdesc) 11093 goto out_error; 11094 11095 phba->sli4_hba.oas_cq = qdesc; 11096 11097 /* Create OAS WQ */ 11098 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 11099 phba->sli4_hba.wq_ecount); 11100 if (!qdesc) 11101 goto out_error; 11102 11103 phba->sli4_hba.oas_wq = qdesc; 11104 11105 } 11106 return 0; 11107 11108 out_error: 11109 lpfc_fof_queue_destroy(phba); 11110 return -ENOMEM; 11111 } 11112 11113 /** 11114 * lpfc_fof_queue_destroy - Destroy all the fof queues 11115 * @phba: pointer to lpfc hba data structure. 11116 * 11117 * This routine is invoked to release all the SLI4 queues with the FC HBA 11118 * operation. 11119 * 11120 * Return codes 11121 * 0 - successful 11122 **/ 11123 int 11124 lpfc_fof_queue_destroy(struct lpfc_hba *phba) 11125 { 11126 /* Release FOF Event queue */ 11127 if (phba->sli4_hba.fof_eq != NULL) { 11128 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); 11129 phba->sli4_hba.fof_eq = NULL; 11130 } 11131 11132 /* Release OAS Completion queue */ 11133 if (phba->sli4_hba.oas_cq != NULL) { 11134 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); 11135 phba->sli4_hba.oas_cq = NULL; 11136 } 11137 11138 /* Release OAS Work queue */ 11139 if (phba->sli4_hba.oas_wq != NULL) { 11140 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); 11141 phba->sli4_hba.oas_wq = NULL; 11142 } 11143 return 0; 11144 } 11145 11146 static struct pci_device_id lpfc_id_table[] = { 11147 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 11148 PCI_ANY_ID, PCI_ANY_ID, }, 11149 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 11150 PCI_ANY_ID, PCI_ANY_ID, }, 11151 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 11152 PCI_ANY_ID, PCI_ANY_ID, }, 11153 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 11154 PCI_ANY_ID, PCI_ANY_ID, }, 11155 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 11156 PCI_ANY_ID, PCI_ANY_ID, }, 11157 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 11158 PCI_ANY_ID, PCI_ANY_ID, }, 11159 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 11160 PCI_ANY_ID, PCI_ANY_ID, }, 11161 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 11162 PCI_ANY_ID, PCI_ANY_ID, }, 11163 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 11164 PCI_ANY_ID, PCI_ANY_ID, }, 11165 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 11166 PCI_ANY_ID, PCI_ANY_ID, }, 11167 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 11168 PCI_ANY_ID, PCI_ANY_ID, }, 11169 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 11170 PCI_ANY_ID, PCI_ANY_ID, }, 11171 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 11172 PCI_ANY_ID, PCI_ANY_ID, }, 11173 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 11174 PCI_ANY_ID, PCI_ANY_ID, }, 11175 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 11176 PCI_ANY_ID, PCI_ANY_ID, }, 11177 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 11178 PCI_ANY_ID, PCI_ANY_ID, }, 11179 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 11180 PCI_ANY_ID, PCI_ANY_ID, }, 11181 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 11182 PCI_ANY_ID, PCI_ANY_ID, }, 11183 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 11184 PCI_ANY_ID, PCI_ANY_ID, }, 11185 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 11186 PCI_ANY_ID, PCI_ANY_ID, }, 11187 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 11188 PCI_ANY_ID, PCI_ANY_ID, }, 11189 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 11190 PCI_ANY_ID, PCI_ANY_ID, }, 11191 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 11192 PCI_ANY_ID, PCI_ANY_ID, }, 11193 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 11194 PCI_ANY_ID, PCI_ANY_ID, }, 11195 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 11196 PCI_ANY_ID, PCI_ANY_ID, }, 11197 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 11198 PCI_ANY_ID, PCI_ANY_ID, }, 11199 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 11200 PCI_ANY_ID, PCI_ANY_ID, }, 11201 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 11202 PCI_ANY_ID, PCI_ANY_ID, }, 11203 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 11204 PCI_ANY_ID, PCI_ANY_ID, }, 11205 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 11206 PCI_ANY_ID, PCI_ANY_ID, }, 11207 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 11208 PCI_ANY_ID, PCI_ANY_ID, }, 11209 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 11210 PCI_ANY_ID, PCI_ANY_ID, }, 11211 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 11212 PCI_ANY_ID, PCI_ANY_ID, }, 11213 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 11214 PCI_ANY_ID, PCI_ANY_ID, }, 11215 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 11216 PCI_ANY_ID, PCI_ANY_ID, }, 11217 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 11218 PCI_ANY_ID, PCI_ANY_ID, }, 11219 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 11220 PCI_ANY_ID, PCI_ANY_ID, }, 11221 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 11222 PCI_ANY_ID, PCI_ANY_ID, }, 11223 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 11224 PCI_ANY_ID, PCI_ANY_ID, }, 11225 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 11226 PCI_ANY_ID, PCI_ANY_ID, }, 11227 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 11228 PCI_ANY_ID, PCI_ANY_ID, }, 11229 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 11230 PCI_ANY_ID, PCI_ANY_ID, }, 11231 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 11232 PCI_ANY_ID, PCI_ANY_ID, }, 11233 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, 11234 PCI_ANY_ID, PCI_ANY_ID, }, 11235 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, 11236 PCI_ANY_ID, PCI_ANY_ID, }, 11237 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, 11238 PCI_ANY_ID, PCI_ANY_ID, }, 11239 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, 11240 PCI_ANY_ID, PCI_ANY_ID, }, 11241 { 0 } 11242 }; 11243 11244 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 11245 11246 static const struct pci_error_handlers lpfc_err_handler = { 11247 .error_detected = lpfc_io_error_detected, 11248 .slot_reset = lpfc_io_slot_reset, 11249 .resume = lpfc_io_resume, 11250 }; 11251 11252 static struct pci_driver lpfc_driver = { 11253 .name = LPFC_DRIVER_NAME, 11254 .id_table = lpfc_id_table, 11255 .probe = lpfc_pci_probe_one, 11256 .remove = lpfc_pci_remove_one, 11257 .suspend = lpfc_pci_suspend_one, 11258 .resume = lpfc_pci_resume_one, 11259 .err_handler = &lpfc_err_handler, 11260 }; 11261 11262 static const struct file_operations lpfc_mgmt_fop = { 11263 .owner = THIS_MODULE, 11264 }; 11265 11266 static struct miscdevice lpfc_mgmt_dev = { 11267 .minor = MISC_DYNAMIC_MINOR, 11268 .name = "lpfcmgmt", 11269 .fops = &lpfc_mgmt_fop, 11270 }; 11271 11272 /** 11273 * lpfc_init - lpfc module initialization routine 11274 * 11275 * This routine is to be invoked when the lpfc module is loaded into the 11276 * kernel. The special kernel macro module_init() is used to indicate the 11277 * role of this routine to the kernel as lpfc module entry point. 11278 * 11279 * Return codes 11280 * 0 - successful 11281 * -ENOMEM - FC attach transport failed 11282 * all others - failed 11283 */ 11284 static int __init 11285 lpfc_init(void) 11286 { 11287 int cpu; 11288 int error = 0; 11289 11290 printk(LPFC_MODULE_DESC "\n"); 11291 printk(LPFC_COPYRIGHT "\n"); 11292 11293 error = misc_register(&lpfc_mgmt_dev); 11294 if (error) 11295 printk(KERN_ERR "Could not register lpfcmgmt device, " 11296 "misc_register returned with status %d", error); 11297 11298 if (lpfc_enable_npiv) { 11299 lpfc_transport_functions.vport_create = lpfc_vport_create; 11300 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 11301 } 11302 lpfc_transport_template = 11303 fc_attach_transport(&lpfc_transport_functions); 11304 if (lpfc_transport_template == NULL) 11305 return -ENOMEM; 11306 if (lpfc_enable_npiv) { 11307 lpfc_vport_transport_template = 11308 fc_attach_transport(&lpfc_vport_transport_functions); 11309 if (lpfc_vport_transport_template == NULL) { 11310 fc_release_transport(lpfc_transport_template); 11311 return -ENOMEM; 11312 } 11313 } 11314 11315 /* Initialize in case vector mapping is needed */ 11316 lpfc_used_cpu = NULL; 11317 lpfc_present_cpu = 0; 11318 for_each_present_cpu(cpu) 11319 lpfc_present_cpu++; 11320 11321 error = pci_register_driver(&lpfc_driver); 11322 if (error) { 11323 fc_release_transport(lpfc_transport_template); 11324 if (lpfc_enable_npiv) 11325 fc_release_transport(lpfc_vport_transport_template); 11326 } 11327 11328 return error; 11329 } 11330 11331 /** 11332 * lpfc_exit - lpfc module removal routine 11333 * 11334 * This routine is invoked when the lpfc module is removed from the kernel. 11335 * The special kernel macro module_exit() is used to indicate the role of 11336 * this routine to the kernel as lpfc module exit point. 11337 */ 11338 static void __exit 11339 lpfc_exit(void) 11340 { 11341 misc_deregister(&lpfc_mgmt_dev); 11342 pci_unregister_driver(&lpfc_driver); 11343 fc_release_transport(lpfc_transport_template); 11344 if (lpfc_enable_npiv) 11345 fc_release_transport(lpfc_vport_transport_template); 11346 if (_dump_buf_data) { 11347 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 11348 "_dump_buf_data at 0x%p\n", 11349 (1L << _dump_buf_data_order), _dump_buf_data); 11350 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 11351 } 11352 11353 if (_dump_buf_dif) { 11354 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 11355 "_dump_buf_dif at 0x%p\n", 11356 (1L << _dump_buf_dif_order), _dump_buf_dif); 11357 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 11358 } 11359 kfree(lpfc_used_cpu); 11360 } 11361 11362 module_init(lpfc_init); 11363 module_exit(lpfc_exit); 11364 MODULE_LICENSE("GPL"); 11365 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 11366 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 11367 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 11368