1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched/clock.h>
34 #include <linux/ctype.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/irq.h>
40 #include <linux/bitops.h>
41 #include <linux/crash_dump.h>
42 #include <linux/cpu.h>
43 #include <linux/cpuhotplug.h>
44
45 #include <scsi/scsi.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_transport_fc.h>
49 #include <scsi/scsi_tcq.h>
50 #include <scsi/fc/fc_fs.h>
51
52 #include "lpfc_hw4.h"
53 #include "lpfc_hw.h"
54 #include "lpfc_sli.h"
55 #include "lpfc_sli4.h"
56 #include "lpfc_nl.h"
57 #include "lpfc_disc.h"
58 #include "lpfc.h"
59 #include "lpfc_scsi.h"
60 #include "lpfc_nvme.h"
61 #include "lpfc_logmsg.h"
62 #include "lpfc_crtn.h"
63 #include "lpfc_vport.h"
64 #include "lpfc_version.h"
65 #include "lpfc_ids.h"
66
67 static enum cpuhp_state lpfc_cpuhp_state;
68 /* Used when mapping IRQ vectors in a driver centric manner */
69 static uint32_t lpfc_present_cpu;
70 static bool lpfc_pldv_detect;
71
72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76 static int lpfc_post_rcv_buf(struct lpfc_hba *);
77 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79 static int lpfc_setup_endian_order(struct lpfc_hba *);
80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83 static void lpfc_init_sgl_list(struct lpfc_hba *);
84 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85 static void lpfc_free_active_sgl(struct lpfc_hba *);
86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
97 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
98
99 static struct scsi_transport_template *lpfc_transport_template = NULL;
100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
101 static DEFINE_IDR(lpfc_hba_index);
102 #define LPFC_NVMET_BUF_POST 254
103 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
104 static void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts);
105
106 /**
107 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
108 * @phba: pointer to lpfc hba data structure.
109 *
110 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
111 * mailbox command. It retrieves the revision information from the HBA and
112 * collects the Vital Product Data (VPD) about the HBA for preparing the
113 * configuration of the HBA.
114 *
115 * Return codes:
116 * 0 - success.
117 * -ERESTART - requests the SLI layer to reset the HBA and try again.
118 * Any other value - indicates an error.
119 **/
120 int
lpfc_config_port_prep(struct lpfc_hba * phba)121 lpfc_config_port_prep(struct lpfc_hba *phba)
122 {
123 lpfc_vpd_t *vp = &phba->vpd;
124 int i = 0, rc;
125 LPFC_MBOXQ_t *pmb;
126 MAILBOX_t *mb;
127 char *lpfc_vpd_data = NULL;
128 uint16_t offset = 0;
129 static char licensed[56] =
130 "key unlock for use with gnu public licensed code only\0";
131 static int init_key = 1;
132
133 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
134 if (!pmb) {
135 phba->link_state = LPFC_HBA_ERROR;
136 return -ENOMEM;
137 }
138
139 mb = &pmb->u.mb;
140 phba->link_state = LPFC_INIT_MBX_CMDS;
141
142 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
143 if (init_key) {
144 uint32_t *ptext = (uint32_t *) licensed;
145
146 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
147 *ptext = cpu_to_be32(*ptext);
148 init_key = 0;
149 }
150
151 lpfc_read_nv(phba, pmb);
152 memset((char*)mb->un.varRDnvp.rsvd3, 0,
153 sizeof (mb->un.varRDnvp.rsvd3));
154 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
155 sizeof (licensed));
156
157 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
158
159 if (rc != MBX_SUCCESS) {
160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
161 "0324 Config Port initialization "
162 "error, mbxCmd x%x READ_NVPARM, "
163 "mbxStatus x%x\n",
164 mb->mbxCommand, mb->mbxStatus);
165 mempool_free(pmb, phba->mbox_mem_pool);
166 return -ERESTART;
167 }
168 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
169 sizeof(phba->wwnn));
170 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
171 sizeof(phba->wwpn));
172 }
173
174 /*
175 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
176 * which was already set in lpfc_get_cfgparam()
177 */
178 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
179
180 /* Setup and issue mailbox READ REV command */
181 lpfc_read_rev(phba, pmb);
182 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
183 if (rc != MBX_SUCCESS) {
184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
185 "0439 Adapter failed to init, mbxCmd x%x "
186 "READ_REV, mbxStatus x%x\n",
187 mb->mbxCommand, mb->mbxStatus);
188 mempool_free( pmb, phba->mbox_mem_pool);
189 return -ERESTART;
190 }
191
192
193 /*
194 * The value of rr must be 1 since the driver set the cv field to 1.
195 * This setting requires the FW to set all revision fields.
196 */
197 if (mb->un.varRdRev.rr == 0) {
198 vp->rev.rBit = 0;
199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
200 "0440 Adapter failed to init, READ_REV has "
201 "missing revision information.\n");
202 mempool_free(pmb, phba->mbox_mem_pool);
203 return -ERESTART;
204 }
205
206 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
207 mempool_free(pmb, phba->mbox_mem_pool);
208 return -EINVAL;
209 }
210
211 /* Save information as VPD data */
212 vp->rev.rBit = 1;
213 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
214 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
215 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
216 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
217 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
218 vp->rev.biuRev = mb->un.varRdRev.biuRev;
219 vp->rev.smRev = mb->un.varRdRev.smRev;
220 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
221 vp->rev.endecRev = mb->un.varRdRev.endecRev;
222 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
223 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
224 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
225 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
226 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
227 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
228
229 /* If the sli feature level is less then 9, we must
230 * tear down all RPIs and VPIs on link down if NPIV
231 * is enabled.
232 */
233 if (vp->rev.feaLevelHigh < 9)
234 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
235
236 if (lpfc_is_LC_HBA(phba->pcidev->device))
237 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
238 sizeof (phba->RandomData));
239
240 /* Get adapter VPD information */
241 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
242 if (!lpfc_vpd_data)
243 goto out_free_mbox;
244 do {
245 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
246 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
247
248 if (rc != MBX_SUCCESS) {
249 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
250 "0441 VPD not present on adapter, "
251 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
252 mb->mbxCommand, mb->mbxStatus);
253 mb->un.varDmp.word_cnt = 0;
254 }
255 /* dump mem may return a zero when finished or we got a
256 * mailbox error, either way we are done.
257 */
258 if (mb->un.varDmp.word_cnt == 0)
259 break;
260
261 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
262 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
263 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
264 lpfc_vpd_data + offset,
265 mb->un.varDmp.word_cnt);
266 offset += mb->un.varDmp.word_cnt;
267 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
268
269 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
270
271 kfree(lpfc_vpd_data);
272 out_free_mbox:
273 mempool_free(pmb, phba->mbox_mem_pool);
274 return 0;
275 }
276
277 /**
278 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
279 * @phba: pointer to lpfc hba data structure.
280 * @pmboxq: pointer to the driver internal queue element for mailbox command.
281 *
282 * This is the completion handler for driver's configuring asynchronous event
283 * mailbox command to the device. If the mailbox command returns successfully,
284 * it will set internal async event support flag to 1; otherwise, it will
285 * set internal async event support flag to 0.
286 **/
287 static void
lpfc_config_async_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)288 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
289 {
290 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
291 phba->temp_sensor_support = 1;
292 else
293 phba->temp_sensor_support = 0;
294 mempool_free(pmboxq, phba->mbox_mem_pool);
295 return;
296 }
297
298 /**
299 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
300 * @phba: pointer to lpfc hba data structure.
301 * @pmboxq: pointer to the driver internal queue element for mailbox command.
302 *
303 * This is the completion handler for dump mailbox command for getting
304 * wake up parameters. When this command complete, the response contain
305 * Option rom version of the HBA. This function translate the version number
306 * into a human readable string and store it in OptionROMVersion.
307 **/
308 static void
lpfc_dump_wakeup_param_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)309 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
310 {
311 struct prog_id *prg;
312 uint32_t prog_id_word;
313 char dist = ' ';
314 /* character array used for decoding dist type. */
315 char dist_char[] = "nabx";
316
317 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
318 mempool_free(pmboxq, phba->mbox_mem_pool);
319 return;
320 }
321
322 prg = (struct prog_id *) &prog_id_word;
323
324 /* word 7 contain option rom version */
325 prog_id_word = pmboxq->u.mb.un.varWords[7];
326
327 /* Decode the Option rom version word to a readable string */
328 dist = dist_char[prg->dist];
329
330 if ((prg->dist == 3) && (prg->num == 0))
331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
332 prg->ver, prg->rev, prg->lev);
333 else
334 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
335 prg->ver, prg->rev, prg->lev,
336 dist, prg->num);
337 mempool_free(pmboxq, phba->mbox_mem_pool);
338 return;
339 }
340
341 /**
342 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
343 * @vport: pointer to lpfc vport data structure.
344 *
345 *
346 * Return codes
347 * None.
348 **/
349 void
lpfc_update_vport_wwn(struct lpfc_vport * vport)350 lpfc_update_vport_wwn(struct lpfc_vport *vport)
351 {
352 struct lpfc_hba *phba = vport->phba;
353
354 /*
355 * If the name is empty or there exists a soft name
356 * then copy the service params name, otherwise use the fc name
357 */
358 if (vport->fc_nodename.u.wwn[0] == 0)
359 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
360 sizeof(struct lpfc_name));
361 else
362 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
363 sizeof(struct lpfc_name));
364
365 /*
366 * If the port name has changed, then set the Param changes flag
367 * to unreg the login
368 */
369 if (vport->fc_portname.u.wwn[0] != 0 &&
370 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
371 sizeof(struct lpfc_name))) {
372 vport->vport_flag |= FAWWPN_PARAM_CHG;
373
374 if (phba->sli_rev == LPFC_SLI_REV4 &&
375 vport->port_type == LPFC_PHYSICAL_PORT &&
376 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
377 if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
378 phba->sli4_hba.fawwpn_flag &=
379 ~LPFC_FAWWPN_FABRIC;
380 lpfc_printf_log(phba, KERN_INFO,
381 LOG_SLI | LOG_DISCOVERY | LOG_ELS,
382 "2701 FA-PWWN change WWPN from %llx to "
383 "%llx: vflag x%x fawwpn_flag x%x\n",
384 wwn_to_u64(vport->fc_portname.u.wwn),
385 wwn_to_u64
386 (vport->fc_sparam.portName.u.wwn),
387 vport->vport_flag,
388 phba->sli4_hba.fawwpn_flag);
389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 sizeof(struct lpfc_name));
391 }
392 }
393
394 if (vport->fc_portname.u.wwn[0] == 0)
395 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
396 sizeof(struct lpfc_name));
397 else
398 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
399 sizeof(struct lpfc_name));
400 }
401
402 /**
403 * lpfc_config_port_post - Perform lpfc initialization after config port
404 * @phba: pointer to lpfc hba data structure.
405 *
406 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
407 * command call. It performs all internal resource and state setups on the
408 * port: post IOCB buffers, enable appropriate host interrupt attentions,
409 * ELS ring timers, etc.
410 *
411 * Return codes
412 * 0 - success.
413 * Any other value - error.
414 **/
415 int
lpfc_config_port_post(struct lpfc_hba * phba)416 lpfc_config_port_post(struct lpfc_hba *phba)
417 {
418 struct lpfc_vport *vport = phba->pport;
419 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
420 LPFC_MBOXQ_t *pmb;
421 MAILBOX_t *mb;
422 struct lpfc_dmabuf *mp;
423 struct lpfc_sli *psli = &phba->sli;
424 uint32_t status, timeout;
425 int i, j;
426 int rc;
427
428 spin_lock_irq(&phba->hbalock);
429 /*
430 * If the Config port completed correctly the HBA is not
431 * over heated any more.
432 */
433 if (phba->over_temp_state == HBA_OVER_TEMP)
434 phba->over_temp_state = HBA_NORMAL_TEMP;
435 spin_unlock_irq(&phba->hbalock);
436
437 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
438 if (!pmb) {
439 phba->link_state = LPFC_HBA_ERROR;
440 return -ENOMEM;
441 }
442 mb = &pmb->u.mb;
443
444 /* Get login parameters for NID. */
445 rc = lpfc_read_sparam(phba, pmb, 0);
446 if (rc) {
447 mempool_free(pmb, phba->mbox_mem_pool);
448 return -ENOMEM;
449 }
450
451 pmb->vport = vport;
452 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
454 "0448 Adapter failed init, mbxCmd x%x "
455 "READ_SPARM mbxStatus x%x\n",
456 mb->mbxCommand, mb->mbxStatus);
457 phba->link_state = LPFC_HBA_ERROR;
458 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
459 return -EIO;
460 }
461
462 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
463
464 /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
465 * longer needed. Prevent unintended ctx_buf access as the mbox is
466 * reused.
467 */
468 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
469 lpfc_mbuf_free(phba, mp->virt, mp->phys);
470 kfree(mp);
471 pmb->ctx_buf = NULL;
472 lpfc_update_vport_wwn(vport);
473
474 /* Update the fc_host data structures with new wwn. */
475 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
476 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
477 fc_host_max_npiv_vports(shost) = phba->max_vpi;
478
479 /* If no serial number in VPD data, use low 6 bytes of WWNN */
480 /* This should be consolidated into parse_vpd ? - mr */
481 if (phba->SerialNumber[0] == 0) {
482 uint8_t *outptr;
483
484 outptr = &vport->fc_nodename.u.s.IEEE[0];
485 for (i = 0; i < 12; i++) {
486 status = *outptr++;
487 j = ((status & 0xf0) >> 4);
488 if (j <= 9)
489 phba->SerialNumber[i] =
490 (char)((uint8_t) 0x30 + (uint8_t) j);
491 else
492 phba->SerialNumber[i] =
493 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
494 i++;
495 j = (status & 0xf);
496 if (j <= 9)
497 phba->SerialNumber[i] =
498 (char)((uint8_t) 0x30 + (uint8_t) j);
499 else
500 phba->SerialNumber[i] =
501 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
502 }
503 }
504
505 lpfc_read_config(phba, pmb);
506 pmb->vport = vport;
507 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
509 "0453 Adapter failed to init, mbxCmd x%x "
510 "READ_CONFIG, mbxStatus x%x\n",
511 mb->mbxCommand, mb->mbxStatus);
512 phba->link_state = LPFC_HBA_ERROR;
513 mempool_free( pmb, phba->mbox_mem_pool);
514 return -EIO;
515 }
516
517 /* Check if the port is disabled */
518 lpfc_sli_read_link_ste(phba);
519
520 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
521 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
522 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
523 "3359 HBA queue depth changed from %d to %d\n",
524 phba->cfg_hba_queue_depth,
525 mb->un.varRdConfig.max_xri);
526 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
527 }
528
529 phba->lmt = mb->un.varRdConfig.lmt;
530
531 /* Get the default values for Model Name and Description */
532 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
533
534 phba->link_state = LPFC_LINK_DOWN;
535
536 /* Only process IOCBs on ELS ring till hba_state is READY */
537 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
538 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
539 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
540 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
541
542 /* Post receive buffers for desired rings */
543 if (phba->sli_rev != 3)
544 lpfc_post_rcv_buf(phba);
545
546 /*
547 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
548 */
549 if (phba->intr_type == MSIX) {
550 rc = lpfc_config_msi(phba, pmb);
551 if (rc) {
552 mempool_free(pmb, phba->mbox_mem_pool);
553 return -EIO;
554 }
555 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
556 if (rc != MBX_SUCCESS) {
557 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
558 "0352 Config MSI mailbox command "
559 "failed, mbxCmd x%x, mbxStatus x%x\n",
560 pmb->u.mb.mbxCommand,
561 pmb->u.mb.mbxStatus);
562 mempool_free(pmb, phba->mbox_mem_pool);
563 return -EIO;
564 }
565 }
566
567 spin_lock_irq(&phba->hbalock);
568 /* Initialize ERATT handling flag */
569 phba->hba_flag &= ~HBA_ERATT_HANDLED;
570
571 /* Enable appropriate host interrupts */
572 if (lpfc_readl(phba->HCregaddr, &status)) {
573 spin_unlock_irq(&phba->hbalock);
574 return -EIO;
575 }
576 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
577 if (psli->num_rings > 0)
578 status |= HC_R0INT_ENA;
579 if (psli->num_rings > 1)
580 status |= HC_R1INT_ENA;
581 if (psli->num_rings > 2)
582 status |= HC_R2INT_ENA;
583 if (psli->num_rings > 3)
584 status |= HC_R3INT_ENA;
585
586 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
587 (phba->cfg_poll & DISABLE_FCP_RING_INT))
588 status &= ~(HC_R0INT_ENA);
589
590 writel(status, phba->HCregaddr);
591 readl(phba->HCregaddr); /* flush */
592 spin_unlock_irq(&phba->hbalock);
593
594 /* Set up ring-0 (ELS) timer */
595 timeout = phba->fc_ratov * 2;
596 mod_timer(&vport->els_tmofunc,
597 jiffies + msecs_to_jiffies(1000 * timeout));
598 /* Set up heart beat (HB) timer */
599 mod_timer(&phba->hb_tmofunc,
600 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
601 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
602 phba->last_completion_time = jiffies;
603 /* Set up error attention (ERATT) polling timer */
604 mod_timer(&phba->eratt_poll,
605 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
606
607 if (phba->hba_flag & LINK_DISABLED) {
608 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
609 "2598 Adapter Link is disabled.\n");
610 lpfc_down_link(phba, pmb);
611 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
612 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
613 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
614 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
615 "2599 Adapter failed to issue DOWN_LINK"
616 " mbox command rc 0x%x\n", rc);
617
618 mempool_free(pmb, phba->mbox_mem_pool);
619 return -EIO;
620 }
621 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
622 mempool_free(pmb, phba->mbox_mem_pool);
623 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
624 if (rc)
625 return rc;
626 }
627 /* MBOX buffer will be freed in mbox compl */
628 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
629 if (!pmb) {
630 phba->link_state = LPFC_HBA_ERROR;
631 return -ENOMEM;
632 }
633
634 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
635 pmb->mbox_cmpl = lpfc_config_async_cmpl;
636 pmb->vport = phba->pport;
637 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
638
639 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
640 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
641 "0456 Adapter failed to issue "
642 "ASYNCEVT_ENABLE mbox status x%x\n",
643 rc);
644 mempool_free(pmb, phba->mbox_mem_pool);
645 }
646
647 /* Get Option rom version */
648 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
649 if (!pmb) {
650 phba->link_state = LPFC_HBA_ERROR;
651 return -ENOMEM;
652 }
653
654 lpfc_dump_wakeup_param(phba, pmb);
655 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
656 pmb->vport = phba->pport;
657 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
658
659 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
660 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
661 "0435 Adapter failed "
662 "to get Option ROM version status x%x\n", rc);
663 mempool_free(pmb, phba->mbox_mem_pool);
664 }
665
666 return 0;
667 }
668
669 /**
670 * lpfc_sli4_refresh_params - update driver copy of params.
671 * @phba: Pointer to HBA context object.
672 *
673 * This is called to refresh driver copy of dynamic fields from the
674 * common_get_sli4_parameters descriptor.
675 **/
676 int
lpfc_sli4_refresh_params(struct lpfc_hba * phba)677 lpfc_sli4_refresh_params(struct lpfc_hba *phba)
678 {
679 LPFC_MBOXQ_t *mboxq;
680 struct lpfc_mqe *mqe;
681 struct lpfc_sli4_parameters *mbx_sli4_parameters;
682 int length, rc;
683
684 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
685 if (!mboxq)
686 return -ENOMEM;
687
688 mqe = &mboxq->u.mqe;
689 /* Read the port's SLI4 Config Parameters */
690 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
691 sizeof(struct lpfc_sli4_cfg_mhdr));
692 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
693 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
694 length, LPFC_SLI4_MBX_EMBED);
695
696 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
697 if (unlikely(rc)) {
698 mempool_free(mboxq, phba->mbox_mem_pool);
699 return rc;
700 }
701 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
702 phba->sli4_hba.pc_sli4_params.mi_cap =
703 bf_get(cfg_mi_ver, mbx_sli4_parameters);
704
705 /* Are we forcing MI off via module parameter? */
706 if (phba->cfg_enable_mi)
707 phba->sli4_hba.pc_sli4_params.mi_ver =
708 bf_get(cfg_mi_ver, mbx_sli4_parameters);
709 else
710 phba->sli4_hba.pc_sli4_params.mi_ver = 0;
711
712 phba->sli4_hba.pc_sli4_params.cmf =
713 bf_get(cfg_cmf, mbx_sli4_parameters);
714 phba->sli4_hba.pc_sli4_params.pls =
715 bf_get(cfg_pvl, mbx_sli4_parameters);
716
717 mempool_free(mboxq, phba->mbox_mem_pool);
718 return rc;
719 }
720
721 /**
722 * lpfc_hba_init_link - Initialize the FC link
723 * @phba: pointer to lpfc hba data structure.
724 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
725 *
726 * This routine will issue the INIT_LINK mailbox command call.
727 * It is available to other drivers through the lpfc_hba data
728 * structure for use as a delayed link up mechanism with the
729 * module parameter lpfc_suppress_link_up.
730 *
731 * Return code
732 * 0 - success
733 * Any other value - error
734 **/
735 static int
lpfc_hba_init_link(struct lpfc_hba * phba,uint32_t flag)736 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
737 {
738 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
739 }
740
741 /**
742 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
743 * @phba: pointer to lpfc hba data structure.
744 * @fc_topology: desired fc topology.
745 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
746 *
747 * This routine will issue the INIT_LINK mailbox command call.
748 * It is available to other drivers through the lpfc_hba data
749 * structure for use as a delayed link up mechanism with the
750 * module parameter lpfc_suppress_link_up.
751 *
752 * Return code
753 * 0 - success
754 * Any other value - error
755 **/
756 int
lpfc_hba_init_link_fc_topology(struct lpfc_hba * phba,uint32_t fc_topology,uint32_t flag)757 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
758 uint32_t flag)
759 {
760 struct lpfc_vport *vport = phba->pport;
761 LPFC_MBOXQ_t *pmb;
762 MAILBOX_t *mb;
763 int rc;
764
765 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
766 if (!pmb) {
767 phba->link_state = LPFC_HBA_ERROR;
768 return -ENOMEM;
769 }
770 mb = &pmb->u.mb;
771 pmb->vport = vport;
772
773 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
774 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
775 !(phba->lmt & LMT_1Gb)) ||
776 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
777 !(phba->lmt & LMT_2Gb)) ||
778 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
779 !(phba->lmt & LMT_4Gb)) ||
780 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
781 !(phba->lmt & LMT_8Gb)) ||
782 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
783 !(phba->lmt & LMT_10Gb)) ||
784 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
785 !(phba->lmt & LMT_16Gb)) ||
786 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
787 !(phba->lmt & LMT_32Gb)) ||
788 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
789 !(phba->lmt & LMT_64Gb))) {
790 /* Reset link speed to auto */
791 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
792 "1302 Invalid speed for this board:%d "
793 "Reset link speed to auto.\n",
794 phba->cfg_link_speed);
795 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
796 }
797 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
798 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
799 if (phba->sli_rev < LPFC_SLI_REV4)
800 lpfc_set_loopback_flag(phba);
801 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
802 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
804 "0498 Adapter failed to init, mbxCmd x%x "
805 "INIT_LINK, mbxStatus x%x\n",
806 mb->mbxCommand, mb->mbxStatus);
807 if (phba->sli_rev <= LPFC_SLI_REV3) {
808 /* Clear all interrupt enable conditions */
809 writel(0, phba->HCregaddr);
810 readl(phba->HCregaddr); /* flush */
811 /* Clear all pending interrupts */
812 writel(0xffffffff, phba->HAregaddr);
813 readl(phba->HAregaddr); /* flush */
814 }
815 phba->link_state = LPFC_HBA_ERROR;
816 if (rc != MBX_BUSY || flag == MBX_POLL)
817 mempool_free(pmb, phba->mbox_mem_pool);
818 return -EIO;
819 }
820 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
821 if (flag == MBX_POLL)
822 mempool_free(pmb, phba->mbox_mem_pool);
823
824 return 0;
825 }
826
827 /**
828 * lpfc_hba_down_link - this routine downs the FC link
829 * @phba: pointer to lpfc hba data structure.
830 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
831 *
832 * This routine will issue the DOWN_LINK mailbox command call.
833 * It is available to other drivers through the lpfc_hba data
834 * structure for use to stop the link.
835 *
836 * Return code
837 * 0 - success
838 * Any other value - error
839 **/
840 static int
lpfc_hba_down_link(struct lpfc_hba * phba,uint32_t flag)841 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
842 {
843 LPFC_MBOXQ_t *pmb;
844 int rc;
845
846 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
847 if (!pmb) {
848 phba->link_state = LPFC_HBA_ERROR;
849 return -ENOMEM;
850 }
851
852 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
853 "0491 Adapter Link is disabled.\n");
854 lpfc_down_link(phba, pmb);
855 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
856 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
857 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
858 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
859 "2522 Adapter failed to issue DOWN_LINK"
860 " mbox command rc 0x%x\n", rc);
861
862 mempool_free(pmb, phba->mbox_mem_pool);
863 return -EIO;
864 }
865 if (flag == MBX_POLL)
866 mempool_free(pmb, phba->mbox_mem_pool);
867
868 return 0;
869 }
870
871 /**
872 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
873 * @phba: pointer to lpfc HBA data structure.
874 *
875 * This routine will do LPFC uninitialization before the HBA is reset when
876 * bringing down the SLI Layer.
877 *
878 * Return codes
879 * 0 - success.
880 * Any other value - error.
881 **/
882 int
lpfc_hba_down_prep(struct lpfc_hba * phba)883 lpfc_hba_down_prep(struct lpfc_hba *phba)
884 {
885 struct lpfc_vport **vports;
886 int i;
887
888 if (phba->sli_rev <= LPFC_SLI_REV3) {
889 /* Disable interrupts */
890 writel(0, phba->HCregaddr);
891 readl(phba->HCregaddr); /* flush */
892 }
893
894 if (phba->pport->load_flag & FC_UNLOADING)
895 lpfc_cleanup_discovery_resources(phba->pport);
896 else {
897 vports = lpfc_create_vport_work_array(phba);
898 if (vports != NULL)
899 for (i = 0; i <= phba->max_vports &&
900 vports[i] != NULL; i++)
901 lpfc_cleanup_discovery_resources(vports[i]);
902 lpfc_destroy_vport_work_array(phba, vports);
903 }
904 return 0;
905 }
906
907 /**
908 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
909 * rspiocb which got deferred
910 *
911 * @phba: pointer to lpfc HBA data structure.
912 *
913 * This routine will cleanup completed slow path events after HBA is reset
914 * when bringing down the SLI Layer.
915 *
916 *
917 * Return codes
918 * void.
919 **/
920 static void
lpfc_sli4_free_sp_events(struct lpfc_hba * phba)921 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
922 {
923 struct lpfc_iocbq *rspiocbq;
924 struct hbq_dmabuf *dmabuf;
925 struct lpfc_cq_event *cq_event;
926
927 spin_lock_irq(&phba->hbalock);
928 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
929 spin_unlock_irq(&phba->hbalock);
930
931 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
932 /* Get the response iocb from the head of work queue */
933 spin_lock_irq(&phba->hbalock);
934 list_remove_head(&phba->sli4_hba.sp_queue_event,
935 cq_event, struct lpfc_cq_event, list);
936 spin_unlock_irq(&phba->hbalock);
937
938 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
939 case CQE_CODE_COMPL_WQE:
940 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
941 cq_event);
942 lpfc_sli_release_iocbq(phba, rspiocbq);
943 break;
944 case CQE_CODE_RECEIVE:
945 case CQE_CODE_RECEIVE_V1:
946 dmabuf = container_of(cq_event, struct hbq_dmabuf,
947 cq_event);
948 lpfc_in_buf_free(phba, &dmabuf->dbuf);
949 }
950 }
951 }
952
953 /**
954 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
955 * @phba: pointer to lpfc HBA data structure.
956 *
957 * This routine will cleanup posted ELS buffers after the HBA is reset
958 * when bringing down the SLI Layer.
959 *
960 *
961 * Return codes
962 * void.
963 **/
964 static void
lpfc_hba_free_post_buf(struct lpfc_hba * phba)965 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
966 {
967 struct lpfc_sli *psli = &phba->sli;
968 struct lpfc_sli_ring *pring;
969 struct lpfc_dmabuf *mp, *next_mp;
970 LIST_HEAD(buflist);
971 int count;
972
973 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
974 lpfc_sli_hbqbuf_free_all(phba);
975 else {
976 /* Cleanup preposted buffers on the ELS ring */
977 pring = &psli->sli3_ring[LPFC_ELS_RING];
978 spin_lock_irq(&phba->hbalock);
979 list_splice_init(&pring->postbufq, &buflist);
980 spin_unlock_irq(&phba->hbalock);
981
982 count = 0;
983 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
984 list_del(&mp->list);
985 count++;
986 lpfc_mbuf_free(phba, mp->virt, mp->phys);
987 kfree(mp);
988 }
989
990 spin_lock_irq(&phba->hbalock);
991 pring->postbufq_cnt -= count;
992 spin_unlock_irq(&phba->hbalock);
993 }
994 }
995
996 /**
997 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
998 * @phba: pointer to lpfc HBA data structure.
999 *
1000 * This routine will cleanup the txcmplq after the HBA is reset when bringing
1001 * down the SLI Layer.
1002 *
1003 * Return codes
1004 * void
1005 **/
1006 static void
lpfc_hba_clean_txcmplq(struct lpfc_hba * phba)1007 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1008 {
1009 struct lpfc_sli *psli = &phba->sli;
1010 struct lpfc_queue *qp = NULL;
1011 struct lpfc_sli_ring *pring;
1012 LIST_HEAD(completions);
1013 int i;
1014 struct lpfc_iocbq *piocb, *next_iocb;
1015
1016 if (phba->sli_rev != LPFC_SLI_REV4) {
1017 for (i = 0; i < psli->num_rings; i++) {
1018 pring = &psli->sli3_ring[i];
1019 spin_lock_irq(&phba->hbalock);
1020 /* At this point in time the HBA is either reset or DOA
1021 * Nothing should be on txcmplq as it will
1022 * NEVER complete.
1023 */
1024 list_splice_init(&pring->txcmplq, &completions);
1025 pring->txcmplq_cnt = 0;
1026 spin_unlock_irq(&phba->hbalock);
1027
1028 lpfc_sli_abort_iocb_ring(phba, pring);
1029 }
1030 /* Cancel all the IOCBs from the completions list */
1031 lpfc_sli_cancel_iocbs(phba, &completions,
1032 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1033 return;
1034 }
1035 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1036 pring = qp->pring;
1037 if (!pring)
1038 continue;
1039 spin_lock_irq(&pring->ring_lock);
1040 list_for_each_entry_safe(piocb, next_iocb,
1041 &pring->txcmplq, list)
1042 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1043 list_splice_init(&pring->txcmplq, &completions);
1044 pring->txcmplq_cnt = 0;
1045 spin_unlock_irq(&pring->ring_lock);
1046 lpfc_sli_abort_iocb_ring(phba, pring);
1047 }
1048 /* Cancel all the IOCBs from the completions list */
1049 lpfc_sli_cancel_iocbs(phba, &completions,
1050 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1051 }
1052
1053 /**
1054 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1055 * @phba: pointer to lpfc HBA data structure.
1056 *
1057 * This routine will do uninitialization after the HBA is reset when bring
1058 * down the SLI Layer.
1059 *
1060 * Return codes
1061 * 0 - success.
1062 * Any other value - error.
1063 **/
1064 static int
lpfc_hba_down_post_s3(struct lpfc_hba * phba)1065 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1066 {
1067 lpfc_hba_free_post_buf(phba);
1068 lpfc_hba_clean_txcmplq(phba);
1069 return 0;
1070 }
1071
1072 /**
1073 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1074 * @phba: pointer to lpfc HBA data structure.
1075 *
1076 * This routine will do uninitialization after the HBA is reset when bring
1077 * down the SLI Layer.
1078 *
1079 * Return codes
1080 * 0 - success.
1081 * Any other value - error.
1082 **/
1083 static int
lpfc_hba_down_post_s4(struct lpfc_hba * phba)1084 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1085 {
1086 struct lpfc_io_buf *psb, *psb_next;
1087 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1088 struct lpfc_sli4_hdw_queue *qp;
1089 LIST_HEAD(aborts);
1090 LIST_HEAD(nvme_aborts);
1091 LIST_HEAD(nvmet_aborts);
1092 struct lpfc_sglq *sglq_entry = NULL;
1093 int cnt, idx;
1094
1095
1096 lpfc_sli_hbqbuf_free_all(phba);
1097 lpfc_hba_clean_txcmplq(phba);
1098
1099 /* At this point in time the HBA is either reset or DOA. Either
1100 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1101 * on the lpfc_els_sgl_list so that it can either be freed if the
1102 * driver is unloading or reposted if the driver is restarting
1103 * the port.
1104 */
1105
1106 /* sgl_list_lock required because worker thread uses this
1107 * list.
1108 */
1109 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1110 list_for_each_entry(sglq_entry,
1111 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1112 sglq_entry->state = SGL_FREED;
1113
1114 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1115 &phba->sli4_hba.lpfc_els_sgl_list);
1116
1117
1118 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1119
1120 /* abts_xxxx_buf_list_lock required because worker thread uses this
1121 * list.
1122 */
1123 spin_lock_irq(&phba->hbalock);
1124 cnt = 0;
1125 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1126 qp = &phba->sli4_hba.hdwq[idx];
1127
1128 spin_lock(&qp->abts_io_buf_list_lock);
1129 list_splice_init(&qp->lpfc_abts_io_buf_list,
1130 &aborts);
1131
1132 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1133 psb->pCmd = NULL;
1134 psb->status = IOSTAT_SUCCESS;
1135 cnt++;
1136 }
1137 spin_lock(&qp->io_buf_list_put_lock);
1138 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1139 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1140 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1141 qp->abts_scsi_io_bufs = 0;
1142 qp->abts_nvme_io_bufs = 0;
1143 spin_unlock(&qp->io_buf_list_put_lock);
1144 spin_unlock(&qp->abts_io_buf_list_lock);
1145 }
1146 spin_unlock_irq(&phba->hbalock);
1147
1148 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1149 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1150 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1151 &nvmet_aborts);
1152 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1153 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1154 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1155 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1156 }
1157 }
1158
1159 lpfc_sli4_free_sp_events(phba);
1160 return cnt;
1161 }
1162
1163 /**
1164 * lpfc_hba_down_post - Wrapper func for hba down post routine
1165 * @phba: pointer to lpfc HBA data structure.
1166 *
1167 * This routine wraps the actual SLI3 or SLI4 routine for performing
1168 * uninitialization after the HBA is reset when bring down the SLI Layer.
1169 *
1170 * Return codes
1171 * 0 - success.
1172 * Any other value - error.
1173 **/
1174 int
lpfc_hba_down_post(struct lpfc_hba * phba)1175 lpfc_hba_down_post(struct lpfc_hba *phba)
1176 {
1177 return (*phba->lpfc_hba_down_post)(phba);
1178 }
1179
1180 /**
1181 * lpfc_hb_timeout - The HBA-timer timeout handler
1182 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1183 *
1184 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1185 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1186 * work-port-events bitmap and the worker thread is notified. This timeout
1187 * event will be used by the worker thread to invoke the actual timeout
1188 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1189 * be performed in the timeout handler and the HBA timeout event bit shall
1190 * be cleared by the worker thread after it has taken the event bitmap out.
1191 **/
1192 static void
lpfc_hb_timeout(struct timer_list * t)1193 lpfc_hb_timeout(struct timer_list *t)
1194 {
1195 struct lpfc_hba *phba;
1196 uint32_t tmo_posted;
1197 unsigned long iflag;
1198
1199 phba = from_timer(phba, t, hb_tmofunc);
1200
1201 /* Check for heart beat timeout conditions */
1202 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1203 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1204 if (!tmo_posted)
1205 phba->pport->work_port_events |= WORKER_HB_TMO;
1206 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1207
1208 /* Tell the worker thread there is work to do */
1209 if (!tmo_posted)
1210 lpfc_worker_wake_up(phba);
1211 return;
1212 }
1213
1214 /**
1215 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1216 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1217 *
1218 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1219 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1220 * work-port-events bitmap and the worker thread is notified. This timeout
1221 * event will be used by the worker thread to invoke the actual timeout
1222 * handler routine, lpfc_rrq_handler. Any periodical operations will
1223 * be performed in the timeout handler and the RRQ timeout event bit shall
1224 * be cleared by the worker thread after it has taken the event bitmap out.
1225 **/
1226 static void
lpfc_rrq_timeout(struct timer_list * t)1227 lpfc_rrq_timeout(struct timer_list *t)
1228 {
1229 struct lpfc_hba *phba;
1230 unsigned long iflag;
1231
1232 phba = from_timer(phba, t, rrq_tmr);
1233 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1234 if (!(phba->pport->load_flag & FC_UNLOADING))
1235 phba->hba_flag |= HBA_RRQ_ACTIVE;
1236 else
1237 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1238 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1239
1240 if (!(phba->pport->load_flag & FC_UNLOADING))
1241 lpfc_worker_wake_up(phba);
1242 }
1243
1244 /**
1245 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1246 * @phba: pointer to lpfc hba data structure.
1247 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1248 *
1249 * This is the callback function to the lpfc heart-beat mailbox command.
1250 * If configured, the lpfc driver issues the heart-beat mailbox command to
1251 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1252 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1253 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1254 * heart-beat outstanding state. Once the mailbox command comes back and
1255 * no error conditions detected, the heart-beat mailbox command timer is
1256 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1257 * state is cleared for the next heart-beat. If the timer expired with the
1258 * heart-beat outstanding state set, the driver will put the HBA offline.
1259 **/
1260 static void
lpfc_hb_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)1261 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1262 {
1263 unsigned long drvr_flag;
1264
1265 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1266 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1267 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1268
1269 /* Check and reset heart-beat timer if necessary */
1270 mempool_free(pmboxq, phba->mbox_mem_pool);
1271 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1272 !(phba->link_state == LPFC_HBA_ERROR) &&
1273 !(phba->pport->load_flag & FC_UNLOADING))
1274 mod_timer(&phba->hb_tmofunc,
1275 jiffies +
1276 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1277 return;
1278 }
1279
1280 /*
1281 * lpfc_idle_stat_delay_work - idle_stat tracking
1282 *
1283 * This routine tracks per-eq idle_stat and determines polling decisions.
1284 *
1285 * Return codes:
1286 * None
1287 **/
1288 static void
lpfc_idle_stat_delay_work(struct work_struct * work)1289 lpfc_idle_stat_delay_work(struct work_struct *work)
1290 {
1291 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1292 struct lpfc_hba,
1293 idle_stat_delay_work);
1294 struct lpfc_queue *eq;
1295 struct lpfc_sli4_hdw_queue *hdwq;
1296 struct lpfc_idle_stat *idle_stat;
1297 u32 i, idle_percent;
1298 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1299
1300 if (phba->pport->load_flag & FC_UNLOADING)
1301 return;
1302
1303 if (phba->link_state == LPFC_HBA_ERROR ||
1304 phba->pport->fc_flag & FC_OFFLINE_MODE ||
1305 phba->cmf_active_mode != LPFC_CFG_OFF)
1306 goto requeue;
1307
1308 for_each_present_cpu(i) {
1309 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1310 eq = hdwq->hba_eq;
1311
1312 /* Skip if we've already handled this eq's primary CPU */
1313 if (eq->chann != i)
1314 continue;
1315
1316 idle_stat = &phba->sli4_hba.idle_stat[i];
1317
1318 /* get_cpu_idle_time returns values as running counters. Thus,
1319 * to know the amount for this period, the prior counter values
1320 * need to be subtracted from the current counter values.
1321 * From there, the idle time stat can be calculated as a
1322 * percentage of 100 - the sum of the other consumption times.
1323 */
1324 wall_idle = get_cpu_idle_time(i, &wall, 1);
1325 diff_idle = wall_idle - idle_stat->prev_idle;
1326 diff_wall = wall - idle_stat->prev_wall;
1327
1328 if (diff_wall <= diff_idle)
1329 busy_time = 0;
1330 else
1331 busy_time = diff_wall - diff_idle;
1332
1333 idle_percent = div64_u64(100 * busy_time, diff_wall);
1334 idle_percent = 100 - idle_percent;
1335
1336 if (idle_percent < 15)
1337 eq->poll_mode = LPFC_QUEUE_WORK;
1338 else
1339 eq->poll_mode = LPFC_THREADED_IRQ;
1340
1341 idle_stat->prev_idle = wall_idle;
1342 idle_stat->prev_wall = wall;
1343 }
1344
1345 requeue:
1346 schedule_delayed_work(&phba->idle_stat_delay_work,
1347 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1348 }
1349
1350 static void
lpfc_hb_eq_delay_work(struct work_struct * work)1351 lpfc_hb_eq_delay_work(struct work_struct *work)
1352 {
1353 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1354 struct lpfc_hba, eq_delay_work);
1355 struct lpfc_eq_intr_info *eqi, *eqi_new;
1356 struct lpfc_queue *eq, *eq_next;
1357 unsigned char *ena_delay = NULL;
1358 uint32_t usdelay;
1359 int i;
1360
1361 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1362 return;
1363
1364 if (phba->link_state == LPFC_HBA_ERROR ||
1365 phba->pport->fc_flag & FC_OFFLINE_MODE)
1366 goto requeue;
1367
1368 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1369 GFP_KERNEL);
1370 if (!ena_delay)
1371 goto requeue;
1372
1373 for (i = 0; i < phba->cfg_irq_chann; i++) {
1374 /* Get the EQ corresponding to the IRQ vector */
1375 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1376 if (!eq)
1377 continue;
1378 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1379 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1380 ena_delay[eq->last_cpu] = 1;
1381 }
1382 }
1383
1384 for_each_present_cpu(i) {
1385 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1386 if (ena_delay[i]) {
1387 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1388 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1389 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1390 } else {
1391 usdelay = 0;
1392 }
1393
1394 eqi->icnt = 0;
1395
1396 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1397 if (unlikely(eq->last_cpu != i)) {
1398 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1399 eq->last_cpu);
1400 list_move_tail(&eq->cpu_list, &eqi_new->list);
1401 continue;
1402 }
1403 if (usdelay != eq->q_mode)
1404 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1405 usdelay);
1406 }
1407 }
1408
1409 kfree(ena_delay);
1410
1411 requeue:
1412 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1413 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1414 }
1415
1416 /**
1417 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1418 * @phba: pointer to lpfc hba data structure.
1419 *
1420 * For each heartbeat, this routine does some heuristic methods to adjust
1421 * XRI distribution. The goal is to fully utilize free XRIs.
1422 **/
lpfc_hb_mxp_handler(struct lpfc_hba * phba)1423 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1424 {
1425 u32 i;
1426 u32 hwq_count;
1427
1428 hwq_count = phba->cfg_hdw_queue;
1429 for (i = 0; i < hwq_count; i++) {
1430 /* Adjust XRIs in private pool */
1431 lpfc_adjust_pvt_pool_count(phba, i);
1432
1433 /* Adjust high watermark */
1434 lpfc_adjust_high_watermark(phba, i);
1435
1436 #ifdef LPFC_MXP_STAT
1437 /* Snapshot pbl, pvt and busy count */
1438 lpfc_snapshot_mxp(phba, i);
1439 #endif
1440 }
1441 }
1442
1443 /**
1444 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1445 * @phba: pointer to lpfc hba data structure.
1446 *
1447 * If a HB mbox is not already in progrees, this routine will allocate
1448 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1449 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1450 **/
1451 int
lpfc_issue_hb_mbox(struct lpfc_hba * phba)1452 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1453 {
1454 LPFC_MBOXQ_t *pmboxq;
1455 int retval;
1456
1457 /* Is a Heartbeat mbox already in progress */
1458 if (phba->hba_flag & HBA_HBEAT_INP)
1459 return 0;
1460
1461 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1462 if (!pmboxq)
1463 return -ENOMEM;
1464
1465 lpfc_heart_beat(phba, pmboxq);
1466 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1467 pmboxq->vport = phba->pport;
1468 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1469
1470 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1471 mempool_free(pmboxq, phba->mbox_mem_pool);
1472 return -ENXIO;
1473 }
1474 phba->hba_flag |= HBA_HBEAT_INP;
1475
1476 return 0;
1477 }
1478
1479 /**
1480 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1481 * @phba: pointer to lpfc hba data structure.
1482 *
1483 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1484 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1485 * of the value of lpfc_enable_hba_heartbeat.
1486 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1487 * try to issue a MBX_HEARTBEAT mbox command.
1488 **/
1489 void
lpfc_issue_hb_tmo(struct lpfc_hba * phba)1490 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1491 {
1492 if (phba->cfg_enable_hba_heartbeat)
1493 return;
1494 phba->hba_flag |= HBA_HBEAT_TMO;
1495 }
1496
1497 /**
1498 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1499 * @phba: pointer to lpfc hba data structure.
1500 *
1501 * This is the actual HBA-timer timeout handler to be invoked by the worker
1502 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1503 * handler performs any periodic operations needed for the device. If such
1504 * periodic event has already been attended to either in the interrupt handler
1505 * or by processing slow-ring or fast-ring events within the HBA-timer
1506 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1507 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1508 * is configured and there is no heart-beat mailbox command outstanding, a
1509 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1510 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1511 * to offline.
1512 **/
1513 void
lpfc_hb_timeout_handler(struct lpfc_hba * phba)1514 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1515 {
1516 struct lpfc_vport **vports;
1517 struct lpfc_dmabuf *buf_ptr;
1518 int retval = 0;
1519 int i, tmo;
1520 struct lpfc_sli *psli = &phba->sli;
1521 LIST_HEAD(completions);
1522
1523 if (phba->cfg_xri_rebalancing) {
1524 /* Multi-XRI pools handler */
1525 lpfc_hb_mxp_handler(phba);
1526 }
1527
1528 vports = lpfc_create_vport_work_array(phba);
1529 if (vports != NULL)
1530 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1531 lpfc_rcv_seq_check_edtov(vports[i]);
1532 lpfc_fdmi_change_check(vports[i]);
1533 }
1534 lpfc_destroy_vport_work_array(phba, vports);
1535
1536 if ((phba->link_state == LPFC_HBA_ERROR) ||
1537 (phba->pport->load_flag & FC_UNLOADING) ||
1538 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1539 return;
1540
1541 if (phba->elsbuf_cnt &&
1542 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1543 spin_lock_irq(&phba->hbalock);
1544 list_splice_init(&phba->elsbuf, &completions);
1545 phba->elsbuf_cnt = 0;
1546 phba->elsbuf_prev_cnt = 0;
1547 spin_unlock_irq(&phba->hbalock);
1548
1549 while (!list_empty(&completions)) {
1550 list_remove_head(&completions, buf_ptr,
1551 struct lpfc_dmabuf, list);
1552 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1553 kfree(buf_ptr);
1554 }
1555 }
1556 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1557
1558 /* If there is no heart beat outstanding, issue a heartbeat command */
1559 if (phba->cfg_enable_hba_heartbeat) {
1560 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1561 spin_lock_irq(&phba->pport->work_port_lock);
1562 if (time_after(phba->last_completion_time +
1563 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1564 jiffies)) {
1565 spin_unlock_irq(&phba->pport->work_port_lock);
1566 if (phba->hba_flag & HBA_HBEAT_INP)
1567 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1568 else
1569 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1570 goto out;
1571 }
1572 spin_unlock_irq(&phba->pport->work_port_lock);
1573
1574 /* Check if a MBX_HEARTBEAT is already in progress */
1575 if (phba->hba_flag & HBA_HBEAT_INP) {
1576 /*
1577 * If heart beat timeout called with HBA_HBEAT_INP set
1578 * we need to give the hb mailbox cmd a chance to
1579 * complete or TMO.
1580 */
1581 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1582 "0459 Adapter heartbeat still outstanding: "
1583 "last compl time was %d ms.\n",
1584 jiffies_to_msecs(jiffies
1585 - phba->last_completion_time));
1586 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1587 } else {
1588 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1589 (list_empty(&psli->mboxq))) {
1590
1591 retval = lpfc_issue_hb_mbox(phba);
1592 if (retval) {
1593 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1594 goto out;
1595 }
1596 phba->skipped_hb = 0;
1597 } else if (time_before_eq(phba->last_completion_time,
1598 phba->skipped_hb)) {
1599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1600 "2857 Last completion time not "
1601 " updated in %d ms\n",
1602 jiffies_to_msecs(jiffies
1603 - phba->last_completion_time));
1604 } else
1605 phba->skipped_hb = jiffies;
1606
1607 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1608 goto out;
1609 }
1610 } else {
1611 /* Check to see if we want to force a MBX_HEARTBEAT */
1612 if (phba->hba_flag & HBA_HBEAT_TMO) {
1613 retval = lpfc_issue_hb_mbox(phba);
1614 if (retval)
1615 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1616 else
1617 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1618 goto out;
1619 }
1620 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1621 }
1622 out:
1623 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1624 }
1625
1626 /**
1627 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1628 * @phba: pointer to lpfc hba data structure.
1629 *
1630 * This routine is called to bring the HBA offline when HBA hardware error
1631 * other than Port Error 6 has been detected.
1632 **/
1633 static void
lpfc_offline_eratt(struct lpfc_hba * phba)1634 lpfc_offline_eratt(struct lpfc_hba *phba)
1635 {
1636 struct lpfc_sli *psli = &phba->sli;
1637
1638 spin_lock_irq(&phba->hbalock);
1639 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1640 spin_unlock_irq(&phba->hbalock);
1641 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1642
1643 lpfc_offline(phba);
1644 lpfc_reset_barrier(phba);
1645 spin_lock_irq(&phba->hbalock);
1646 lpfc_sli_brdreset(phba);
1647 spin_unlock_irq(&phba->hbalock);
1648 lpfc_hba_down_post(phba);
1649 lpfc_sli_brdready(phba, HS_MBRDY);
1650 lpfc_unblock_mgmt_io(phba);
1651 phba->link_state = LPFC_HBA_ERROR;
1652 return;
1653 }
1654
1655 /**
1656 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1657 * @phba: pointer to lpfc hba data structure.
1658 *
1659 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1660 * other than Port Error 6 has been detected.
1661 **/
1662 void
lpfc_sli4_offline_eratt(struct lpfc_hba * phba)1663 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1664 {
1665 spin_lock_irq(&phba->hbalock);
1666 if (phba->link_state == LPFC_HBA_ERROR &&
1667 test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1668 spin_unlock_irq(&phba->hbalock);
1669 return;
1670 }
1671 phba->link_state = LPFC_HBA_ERROR;
1672 spin_unlock_irq(&phba->hbalock);
1673
1674 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1675 lpfc_sli_flush_io_rings(phba);
1676 lpfc_offline(phba);
1677 lpfc_hba_down_post(phba);
1678 lpfc_unblock_mgmt_io(phba);
1679 }
1680
1681 /**
1682 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1683 * @phba: pointer to lpfc hba data structure.
1684 *
1685 * This routine is invoked to handle the deferred HBA hardware error
1686 * conditions. This type of error is indicated by HBA by setting ER1
1687 * and another ER bit in the host status register. The driver will
1688 * wait until the ER1 bit clears before handling the error condition.
1689 **/
1690 static void
lpfc_handle_deferred_eratt(struct lpfc_hba * phba)1691 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1692 {
1693 uint32_t old_host_status = phba->work_hs;
1694 struct lpfc_sli *psli = &phba->sli;
1695
1696 /* If the pci channel is offline, ignore possible errors,
1697 * since we cannot communicate with the pci card anyway.
1698 */
1699 if (pci_channel_offline(phba->pcidev)) {
1700 spin_lock_irq(&phba->hbalock);
1701 phba->hba_flag &= ~DEFER_ERATT;
1702 spin_unlock_irq(&phba->hbalock);
1703 return;
1704 }
1705
1706 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1707 "0479 Deferred Adapter Hardware Error "
1708 "Data: x%x x%x x%x\n",
1709 phba->work_hs, phba->work_status[0],
1710 phba->work_status[1]);
1711
1712 spin_lock_irq(&phba->hbalock);
1713 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1714 spin_unlock_irq(&phba->hbalock);
1715
1716
1717 /*
1718 * Firmware stops when it triggred erratt. That could cause the I/Os
1719 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1720 * SCSI layer retry it after re-establishing link.
1721 */
1722 lpfc_sli_abort_fcp_rings(phba);
1723
1724 /*
1725 * There was a firmware error. Take the hba offline and then
1726 * attempt to restart it.
1727 */
1728 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1729 lpfc_offline(phba);
1730
1731 /* Wait for the ER1 bit to clear.*/
1732 while (phba->work_hs & HS_FFER1) {
1733 msleep(100);
1734 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1735 phba->work_hs = UNPLUG_ERR ;
1736 break;
1737 }
1738 /* If driver is unloading let the worker thread continue */
1739 if (phba->pport->load_flag & FC_UNLOADING) {
1740 phba->work_hs = 0;
1741 break;
1742 }
1743 }
1744
1745 /*
1746 * This is to ptrotect against a race condition in which
1747 * first write to the host attention register clear the
1748 * host status register.
1749 */
1750 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1751 phba->work_hs = old_host_status & ~HS_FFER1;
1752
1753 spin_lock_irq(&phba->hbalock);
1754 phba->hba_flag &= ~DEFER_ERATT;
1755 spin_unlock_irq(&phba->hbalock);
1756 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1757 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1758 }
1759
1760 static void
lpfc_board_errevt_to_mgmt(struct lpfc_hba * phba)1761 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1762 {
1763 struct lpfc_board_event_header board_event;
1764 struct Scsi_Host *shost;
1765
1766 board_event.event_type = FC_REG_BOARD_EVENT;
1767 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1768 shost = lpfc_shost_from_vport(phba->pport);
1769 fc_host_post_vendor_event(shost, fc_get_event_number(),
1770 sizeof(board_event),
1771 (char *) &board_event,
1772 LPFC_NL_VENDOR_ID);
1773 }
1774
1775 /**
1776 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1777 * @phba: pointer to lpfc hba data structure.
1778 *
1779 * This routine is invoked to handle the following HBA hardware error
1780 * conditions:
1781 * 1 - HBA error attention interrupt
1782 * 2 - DMA ring index out of range
1783 * 3 - Mailbox command came back as unknown
1784 **/
1785 static void
lpfc_handle_eratt_s3(struct lpfc_hba * phba)1786 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1787 {
1788 struct lpfc_vport *vport = phba->pport;
1789 struct lpfc_sli *psli = &phba->sli;
1790 uint32_t event_data;
1791 unsigned long temperature;
1792 struct temp_event temp_event_data;
1793 struct Scsi_Host *shost;
1794
1795 /* If the pci channel is offline, ignore possible errors,
1796 * since we cannot communicate with the pci card anyway.
1797 */
1798 if (pci_channel_offline(phba->pcidev)) {
1799 spin_lock_irq(&phba->hbalock);
1800 phba->hba_flag &= ~DEFER_ERATT;
1801 spin_unlock_irq(&phba->hbalock);
1802 return;
1803 }
1804
1805 /* If resets are disabled then leave the HBA alone and return */
1806 if (!phba->cfg_enable_hba_reset)
1807 return;
1808
1809 /* Send an internal error event to mgmt application */
1810 lpfc_board_errevt_to_mgmt(phba);
1811
1812 if (phba->hba_flag & DEFER_ERATT)
1813 lpfc_handle_deferred_eratt(phba);
1814
1815 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1816 if (phba->work_hs & HS_FFER6)
1817 /* Re-establishing Link */
1818 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1819 "1301 Re-establishing Link "
1820 "Data: x%x x%x x%x\n",
1821 phba->work_hs, phba->work_status[0],
1822 phba->work_status[1]);
1823 if (phba->work_hs & HS_FFER8)
1824 /* Device Zeroization */
1825 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1826 "2861 Host Authentication device "
1827 "zeroization Data:x%x x%x x%x\n",
1828 phba->work_hs, phba->work_status[0],
1829 phba->work_status[1]);
1830
1831 spin_lock_irq(&phba->hbalock);
1832 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1833 spin_unlock_irq(&phba->hbalock);
1834
1835 /*
1836 * Firmware stops when it triggled erratt with HS_FFER6.
1837 * That could cause the I/Os dropped by the firmware.
1838 * Error iocb (I/O) on txcmplq and let the SCSI layer
1839 * retry it after re-establishing link.
1840 */
1841 lpfc_sli_abort_fcp_rings(phba);
1842
1843 /*
1844 * There was a firmware error. Take the hba offline and then
1845 * attempt to restart it.
1846 */
1847 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1848 lpfc_offline(phba);
1849 lpfc_sli_brdrestart(phba);
1850 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1851 lpfc_unblock_mgmt_io(phba);
1852 return;
1853 }
1854 lpfc_unblock_mgmt_io(phba);
1855 } else if (phba->work_hs & HS_CRIT_TEMP) {
1856 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1857 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1858 temp_event_data.event_code = LPFC_CRIT_TEMP;
1859 temp_event_data.data = (uint32_t)temperature;
1860
1861 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1862 "0406 Adapter maximum temperature exceeded "
1863 "(%ld), taking this port offline "
1864 "Data: x%x x%x x%x\n",
1865 temperature, phba->work_hs,
1866 phba->work_status[0], phba->work_status[1]);
1867
1868 shost = lpfc_shost_from_vport(phba->pport);
1869 fc_host_post_vendor_event(shost, fc_get_event_number(),
1870 sizeof(temp_event_data),
1871 (char *) &temp_event_data,
1872 SCSI_NL_VID_TYPE_PCI
1873 | PCI_VENDOR_ID_EMULEX);
1874
1875 spin_lock_irq(&phba->hbalock);
1876 phba->over_temp_state = HBA_OVER_TEMP;
1877 spin_unlock_irq(&phba->hbalock);
1878 lpfc_offline_eratt(phba);
1879
1880 } else {
1881 /* The if clause above forces this code path when the status
1882 * failure is a value other than FFER6. Do not call the offline
1883 * twice. This is the adapter hardware error path.
1884 */
1885 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1886 "0457 Adapter Hardware Error "
1887 "Data: x%x x%x x%x\n",
1888 phba->work_hs,
1889 phba->work_status[0], phba->work_status[1]);
1890
1891 event_data = FC_REG_DUMP_EVENT;
1892 shost = lpfc_shost_from_vport(vport);
1893 fc_host_post_vendor_event(shost, fc_get_event_number(),
1894 sizeof(event_data), (char *) &event_data,
1895 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1896
1897 lpfc_offline_eratt(phba);
1898 }
1899 return;
1900 }
1901
1902 /**
1903 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1904 * @phba: pointer to lpfc hba data structure.
1905 * @mbx_action: flag for mailbox shutdown action.
1906 * @en_rn_msg: send reset/port recovery message.
1907 * This routine is invoked to perform an SLI4 port PCI function reset in
1908 * response to port status register polling attention. It waits for port
1909 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1910 * During this process, interrupt vectors are freed and later requested
1911 * for handling possible port resource change.
1912 **/
1913 static int
lpfc_sli4_port_sta_fn_reset(struct lpfc_hba * phba,int mbx_action,bool en_rn_msg)1914 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1915 bool en_rn_msg)
1916 {
1917 int rc;
1918 uint32_t intr_mode;
1919 LPFC_MBOXQ_t *mboxq;
1920
1921 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1922 LPFC_SLI_INTF_IF_TYPE_2) {
1923 /*
1924 * On error status condition, driver need to wait for port
1925 * ready before performing reset.
1926 */
1927 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1928 if (rc)
1929 return rc;
1930 }
1931
1932 /* need reset: attempt for port recovery */
1933 if (en_rn_msg)
1934 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1935 "2887 Reset Needed: Attempting Port "
1936 "Recovery...\n");
1937
1938 /* If we are no wait, the HBA has been reset and is not
1939 * functional, thus we should clear
1940 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1941 */
1942 if (mbx_action == LPFC_MBX_NO_WAIT) {
1943 spin_lock_irq(&phba->hbalock);
1944 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1945 if (phba->sli.mbox_active) {
1946 mboxq = phba->sli.mbox_active;
1947 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1948 __lpfc_mbox_cmpl_put(phba, mboxq);
1949 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1950 phba->sli.mbox_active = NULL;
1951 }
1952 spin_unlock_irq(&phba->hbalock);
1953 }
1954
1955 lpfc_offline_prep(phba, mbx_action);
1956 lpfc_sli_flush_io_rings(phba);
1957 lpfc_offline(phba);
1958 /* release interrupt for possible resource change */
1959 lpfc_sli4_disable_intr(phba);
1960 rc = lpfc_sli_brdrestart(phba);
1961 if (rc) {
1962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1963 "6309 Failed to restart board\n");
1964 return rc;
1965 }
1966 /* request and enable interrupt */
1967 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1968 if (intr_mode == LPFC_INTR_ERROR) {
1969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1970 "3175 Failed to enable interrupt\n");
1971 return -EIO;
1972 }
1973 phba->intr_mode = intr_mode;
1974 rc = lpfc_online(phba);
1975 if (rc == 0)
1976 lpfc_unblock_mgmt_io(phba);
1977
1978 return rc;
1979 }
1980
1981 /**
1982 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1983 * @phba: pointer to lpfc hba data structure.
1984 *
1985 * This routine is invoked to handle the SLI4 HBA hardware error attention
1986 * conditions.
1987 **/
1988 static void
lpfc_handle_eratt_s4(struct lpfc_hba * phba)1989 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1990 {
1991 struct lpfc_vport *vport = phba->pport;
1992 uint32_t event_data;
1993 struct Scsi_Host *shost;
1994 uint32_t if_type;
1995 struct lpfc_register portstat_reg = {0};
1996 uint32_t reg_err1, reg_err2;
1997 uint32_t uerrlo_reg, uemasklo_reg;
1998 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1999 bool en_rn_msg = true;
2000 struct temp_event temp_event_data;
2001 struct lpfc_register portsmphr_reg;
2002 int rc, i;
2003
2004 /* If the pci channel is offline, ignore possible errors, since
2005 * we cannot communicate with the pci card anyway.
2006 */
2007 if (pci_channel_offline(phba->pcidev)) {
2008 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2009 "3166 pci channel is offline\n");
2010 lpfc_sli_flush_io_rings(phba);
2011 return;
2012 }
2013
2014 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2015 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2016 switch (if_type) {
2017 case LPFC_SLI_INTF_IF_TYPE_0:
2018 pci_rd_rc1 = lpfc_readl(
2019 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2020 &uerrlo_reg);
2021 pci_rd_rc2 = lpfc_readl(
2022 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2023 &uemasklo_reg);
2024 /* consider PCI bus read error as pci_channel_offline */
2025 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2026 return;
2027 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
2028 lpfc_sli4_offline_eratt(phba);
2029 return;
2030 }
2031 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2032 "7623 Checking UE recoverable");
2033
2034 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2035 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2036 &portsmphr_reg.word0))
2037 continue;
2038
2039 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2040 &portsmphr_reg);
2041 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2042 LPFC_PORT_SEM_UE_RECOVERABLE)
2043 break;
2044 /*Sleep for 1Sec, before checking SEMAPHORE */
2045 msleep(1000);
2046 }
2047
2048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2049 "4827 smphr_port_status x%x : Waited %dSec",
2050 smphr_port_status, i);
2051
2052 /* Recoverable UE, reset the HBA device */
2053 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2054 LPFC_PORT_SEM_UE_RECOVERABLE) {
2055 for (i = 0; i < 20; i++) {
2056 msleep(1000);
2057 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2058 &portsmphr_reg.word0) &&
2059 (LPFC_POST_STAGE_PORT_READY ==
2060 bf_get(lpfc_port_smphr_port_status,
2061 &portsmphr_reg))) {
2062 rc = lpfc_sli4_port_sta_fn_reset(phba,
2063 LPFC_MBX_NO_WAIT, en_rn_msg);
2064 if (rc == 0)
2065 return;
2066 lpfc_printf_log(phba, KERN_ERR,
2067 LOG_TRACE_EVENT,
2068 "4215 Failed to recover UE");
2069 break;
2070 }
2071 }
2072 }
2073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2074 "7624 Firmware not ready: Failing UE recovery,"
2075 " waited %dSec", i);
2076 phba->link_state = LPFC_HBA_ERROR;
2077 break;
2078
2079 case LPFC_SLI_INTF_IF_TYPE_2:
2080 case LPFC_SLI_INTF_IF_TYPE_6:
2081 pci_rd_rc1 = lpfc_readl(
2082 phba->sli4_hba.u.if_type2.STATUSregaddr,
2083 &portstat_reg.word0);
2084 /* consider PCI bus read error as pci_channel_offline */
2085 if (pci_rd_rc1 == -EIO) {
2086 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2087 "3151 PCI bus read access failure: x%x\n",
2088 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2089 lpfc_sli4_offline_eratt(phba);
2090 return;
2091 }
2092 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2093 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2094 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2095 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2096 "2889 Port Overtemperature event, "
2097 "taking port offline Data: x%x x%x\n",
2098 reg_err1, reg_err2);
2099
2100 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2101 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2102 temp_event_data.event_code = LPFC_CRIT_TEMP;
2103 temp_event_data.data = 0xFFFFFFFF;
2104
2105 shost = lpfc_shost_from_vport(phba->pport);
2106 fc_host_post_vendor_event(shost, fc_get_event_number(),
2107 sizeof(temp_event_data),
2108 (char *)&temp_event_data,
2109 SCSI_NL_VID_TYPE_PCI
2110 | PCI_VENDOR_ID_EMULEX);
2111
2112 spin_lock_irq(&phba->hbalock);
2113 phba->over_temp_state = HBA_OVER_TEMP;
2114 spin_unlock_irq(&phba->hbalock);
2115 lpfc_sli4_offline_eratt(phba);
2116 return;
2117 }
2118 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2119 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2120 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2121 "3143 Port Down: Firmware Update "
2122 "Detected\n");
2123 en_rn_msg = false;
2124 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2125 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2126 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2127 "3144 Port Down: Debug Dump\n");
2128 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2129 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2130 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2131 "3145 Port Down: Provisioning\n");
2132
2133 /* If resets are disabled then leave the HBA alone and return */
2134 if (!phba->cfg_enable_hba_reset)
2135 return;
2136
2137 /* Check port status register for function reset */
2138 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2139 en_rn_msg);
2140 if (rc == 0) {
2141 /* don't report event on forced debug dump */
2142 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2143 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2144 return;
2145 else
2146 break;
2147 }
2148 /* fall through for not able to recover */
2149 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2150 "3152 Unrecoverable error\n");
2151 lpfc_sli4_offline_eratt(phba);
2152 break;
2153 case LPFC_SLI_INTF_IF_TYPE_1:
2154 default:
2155 break;
2156 }
2157 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2158 "3123 Report dump event to upper layer\n");
2159 /* Send an internal error event to mgmt application */
2160 lpfc_board_errevt_to_mgmt(phba);
2161
2162 event_data = FC_REG_DUMP_EVENT;
2163 shost = lpfc_shost_from_vport(vport);
2164 fc_host_post_vendor_event(shost, fc_get_event_number(),
2165 sizeof(event_data), (char *) &event_data,
2166 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2167 }
2168
2169 /**
2170 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2171 * @phba: pointer to lpfc HBA data structure.
2172 *
2173 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2174 * routine from the API jump table function pointer from the lpfc_hba struct.
2175 *
2176 * Return codes
2177 * 0 - success.
2178 * Any other value - error.
2179 **/
2180 void
lpfc_handle_eratt(struct lpfc_hba * phba)2181 lpfc_handle_eratt(struct lpfc_hba *phba)
2182 {
2183 (*phba->lpfc_handle_eratt)(phba);
2184 }
2185
2186 /**
2187 * lpfc_handle_latt - The HBA link event handler
2188 * @phba: pointer to lpfc hba data structure.
2189 *
2190 * This routine is invoked from the worker thread to handle a HBA host
2191 * attention link event. SLI3 only.
2192 **/
2193 void
lpfc_handle_latt(struct lpfc_hba * phba)2194 lpfc_handle_latt(struct lpfc_hba *phba)
2195 {
2196 struct lpfc_vport *vport = phba->pport;
2197 struct lpfc_sli *psli = &phba->sli;
2198 LPFC_MBOXQ_t *pmb;
2199 volatile uint32_t control;
2200 int rc = 0;
2201
2202 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2203 if (!pmb) {
2204 rc = 1;
2205 goto lpfc_handle_latt_err_exit;
2206 }
2207
2208 rc = lpfc_mbox_rsrc_prep(phba, pmb);
2209 if (rc) {
2210 rc = 2;
2211 mempool_free(pmb, phba->mbox_mem_pool);
2212 goto lpfc_handle_latt_err_exit;
2213 }
2214
2215 /* Cleanup any outstanding ELS commands */
2216 lpfc_els_flush_all_cmd(phba);
2217 psli->slistat.link_event++;
2218 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
2219 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2220 pmb->vport = vport;
2221 /* Block ELS IOCBs until we have processed this mbox command */
2222 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2223 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2224 if (rc == MBX_NOT_FINISHED) {
2225 rc = 4;
2226 goto lpfc_handle_latt_free_mbuf;
2227 }
2228
2229 /* Clear Link Attention in HA REG */
2230 spin_lock_irq(&phba->hbalock);
2231 writel(HA_LATT, phba->HAregaddr);
2232 readl(phba->HAregaddr); /* flush */
2233 spin_unlock_irq(&phba->hbalock);
2234
2235 return;
2236
2237 lpfc_handle_latt_free_mbuf:
2238 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2239 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2240 lpfc_handle_latt_err_exit:
2241 /* Enable Link attention interrupts */
2242 spin_lock_irq(&phba->hbalock);
2243 psli->sli_flag |= LPFC_PROCESS_LA;
2244 control = readl(phba->HCregaddr);
2245 control |= HC_LAINT_ENA;
2246 writel(control, phba->HCregaddr);
2247 readl(phba->HCregaddr); /* flush */
2248
2249 /* Clear Link Attention in HA REG */
2250 writel(HA_LATT, phba->HAregaddr);
2251 readl(phba->HAregaddr); /* flush */
2252 spin_unlock_irq(&phba->hbalock);
2253 lpfc_linkdown(phba);
2254 phba->link_state = LPFC_HBA_ERROR;
2255
2256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2257 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2258
2259 return;
2260 }
2261
2262 static void
lpfc_fill_vpd(struct lpfc_hba * phba,uint8_t * vpd,int length,int * pindex)2263 lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
2264 {
2265 int i, j;
2266
2267 while (length > 0) {
2268 /* Look for Serial Number */
2269 if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
2270 *pindex += 2;
2271 i = vpd[*pindex];
2272 *pindex += 1;
2273 j = 0;
2274 length -= (3+i);
2275 while (i--) {
2276 phba->SerialNumber[j++] = vpd[(*pindex)++];
2277 if (j == 31)
2278 break;
2279 }
2280 phba->SerialNumber[j] = 0;
2281 continue;
2282 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
2283 phba->vpd_flag |= VPD_MODEL_DESC;
2284 *pindex += 2;
2285 i = vpd[*pindex];
2286 *pindex += 1;
2287 j = 0;
2288 length -= (3+i);
2289 while (i--) {
2290 phba->ModelDesc[j++] = vpd[(*pindex)++];
2291 if (j == 255)
2292 break;
2293 }
2294 phba->ModelDesc[j] = 0;
2295 continue;
2296 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
2297 phba->vpd_flag |= VPD_MODEL_NAME;
2298 *pindex += 2;
2299 i = vpd[*pindex];
2300 *pindex += 1;
2301 j = 0;
2302 length -= (3+i);
2303 while (i--) {
2304 phba->ModelName[j++] = vpd[(*pindex)++];
2305 if (j == 79)
2306 break;
2307 }
2308 phba->ModelName[j] = 0;
2309 continue;
2310 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
2311 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2312 *pindex += 2;
2313 i = vpd[*pindex];
2314 *pindex += 1;
2315 j = 0;
2316 length -= (3+i);
2317 while (i--) {
2318 phba->ProgramType[j++] = vpd[(*pindex)++];
2319 if (j == 255)
2320 break;
2321 }
2322 phba->ProgramType[j] = 0;
2323 continue;
2324 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
2325 phba->vpd_flag |= VPD_PORT;
2326 *pindex += 2;
2327 i = vpd[*pindex];
2328 *pindex += 1;
2329 j = 0;
2330 length -= (3 + i);
2331 while (i--) {
2332 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2333 (phba->sli4_hba.pport_name_sta ==
2334 LPFC_SLI4_PPNAME_GET)) {
2335 j++;
2336 (*pindex)++;
2337 } else
2338 phba->Port[j++] = vpd[(*pindex)++];
2339 if (j == 19)
2340 break;
2341 }
2342 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2343 (phba->sli4_hba.pport_name_sta ==
2344 LPFC_SLI4_PPNAME_NON))
2345 phba->Port[j] = 0;
2346 continue;
2347 } else {
2348 *pindex += 2;
2349 i = vpd[*pindex];
2350 *pindex += 1;
2351 *pindex += i;
2352 length -= (3 + i);
2353 }
2354 }
2355 }
2356
2357 /**
2358 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2359 * @phba: pointer to lpfc hba data structure.
2360 * @vpd: pointer to the vital product data.
2361 * @len: length of the vital product data in bytes.
2362 *
2363 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2364 * an array of characters. In this routine, the ModelName, ProgramType, and
2365 * ModelDesc, etc. fields of the phba data structure will be populated.
2366 *
2367 * Return codes
2368 * 0 - pointer to the VPD passed in is NULL
2369 * 1 - success
2370 **/
2371 int
lpfc_parse_vpd(struct lpfc_hba * phba,uint8_t * vpd,int len)2372 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2373 {
2374 uint8_t lenlo, lenhi;
2375 int Length;
2376 int i;
2377 int finished = 0;
2378 int index = 0;
2379
2380 if (!vpd)
2381 return 0;
2382
2383 /* Vital Product */
2384 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2385 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2386 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2387 (uint32_t) vpd[3]);
2388 while (!finished && (index < (len - 4))) {
2389 switch (vpd[index]) {
2390 case 0x82:
2391 case 0x91:
2392 index += 1;
2393 lenlo = vpd[index];
2394 index += 1;
2395 lenhi = vpd[index];
2396 index += 1;
2397 i = ((((unsigned short)lenhi) << 8) + lenlo);
2398 index += i;
2399 break;
2400 case 0x90:
2401 index += 1;
2402 lenlo = vpd[index];
2403 index += 1;
2404 lenhi = vpd[index];
2405 index += 1;
2406 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2407 if (Length > len - index)
2408 Length = len - index;
2409
2410 lpfc_fill_vpd(phba, vpd, Length, &index);
2411 finished = 0;
2412 break;
2413 case 0x78:
2414 finished = 1;
2415 break;
2416 default:
2417 index ++;
2418 break;
2419 }
2420 }
2421
2422 return(1);
2423 }
2424
2425 /**
2426 * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2427 * @phba: pointer to lpfc hba data structure.
2428 * @mdp: pointer to the data structure to hold the derived model name.
2429 * @descp: pointer to the data structure to hold the derived description.
2430 *
2431 * This routine retrieves HBA's description based on its registered PCI device
2432 * ID. The @descp passed into this function points to an array of 256 chars. It
2433 * shall be returned with the model name, maximum speed, and the host bus type.
2434 * The @mdp passed into this function points to an array of 80 chars. When the
2435 * function returns, the @mdp will be filled with the model name.
2436 **/
2437 static void
lpfc_get_atto_model_desc(struct lpfc_hba * phba,uint8_t * mdp,uint8_t * descp)2438 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2439 {
2440 uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2441 char *model = "<Unknown>";
2442 int tbolt = 0;
2443
2444 switch (sub_dev_id) {
2445 case PCI_DEVICE_ID_CLRY_161E:
2446 model = "161E";
2447 break;
2448 case PCI_DEVICE_ID_CLRY_162E:
2449 model = "162E";
2450 break;
2451 case PCI_DEVICE_ID_CLRY_164E:
2452 model = "164E";
2453 break;
2454 case PCI_DEVICE_ID_CLRY_161P:
2455 model = "161P";
2456 break;
2457 case PCI_DEVICE_ID_CLRY_162P:
2458 model = "162P";
2459 break;
2460 case PCI_DEVICE_ID_CLRY_164P:
2461 model = "164P";
2462 break;
2463 case PCI_DEVICE_ID_CLRY_321E:
2464 model = "321E";
2465 break;
2466 case PCI_DEVICE_ID_CLRY_322E:
2467 model = "322E";
2468 break;
2469 case PCI_DEVICE_ID_CLRY_324E:
2470 model = "324E";
2471 break;
2472 case PCI_DEVICE_ID_CLRY_321P:
2473 model = "321P";
2474 break;
2475 case PCI_DEVICE_ID_CLRY_322P:
2476 model = "322P";
2477 break;
2478 case PCI_DEVICE_ID_CLRY_324P:
2479 model = "324P";
2480 break;
2481 case PCI_DEVICE_ID_TLFC_2XX2:
2482 model = "2XX2";
2483 tbolt = 1;
2484 break;
2485 case PCI_DEVICE_ID_TLFC_3162:
2486 model = "3162";
2487 tbolt = 1;
2488 break;
2489 case PCI_DEVICE_ID_TLFC_3322:
2490 model = "3322";
2491 tbolt = 1;
2492 break;
2493 default:
2494 model = "Unknown";
2495 break;
2496 }
2497
2498 if (mdp && mdp[0] == '\0')
2499 snprintf(mdp, 79, "%s", model);
2500
2501 if (descp && descp[0] == '\0')
2502 snprintf(descp, 255,
2503 "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2504 (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2505 model,
2506 phba->Port);
2507 }
2508
2509 /**
2510 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2511 * @phba: pointer to lpfc hba data structure.
2512 * @mdp: pointer to the data structure to hold the derived model name.
2513 * @descp: pointer to the data structure to hold the derived description.
2514 *
2515 * This routine retrieves HBA's description based on its registered PCI device
2516 * ID. The @descp passed into this function points to an array of 256 chars. It
2517 * shall be returned with the model name, maximum speed, and the host bus type.
2518 * The @mdp passed into this function points to an array of 80 chars. When the
2519 * function returns, the @mdp will be filled with the model name.
2520 **/
2521 static void
lpfc_get_hba_model_desc(struct lpfc_hba * phba,uint8_t * mdp,uint8_t * descp)2522 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2523 {
2524 lpfc_vpd_t *vp;
2525 uint16_t dev_id = phba->pcidev->device;
2526 int max_speed;
2527 int GE = 0;
2528 int oneConnect = 0; /* default is not a oneConnect */
2529 struct {
2530 char *name;
2531 char *bus;
2532 char *function;
2533 } m = {"<Unknown>", "", ""};
2534
2535 if (mdp && mdp[0] != '\0'
2536 && descp && descp[0] != '\0')
2537 return;
2538
2539 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2540 lpfc_get_atto_model_desc(phba, mdp, descp);
2541 return;
2542 }
2543
2544 if (phba->lmt & LMT_64Gb)
2545 max_speed = 64;
2546 else if (phba->lmt & LMT_32Gb)
2547 max_speed = 32;
2548 else if (phba->lmt & LMT_16Gb)
2549 max_speed = 16;
2550 else if (phba->lmt & LMT_10Gb)
2551 max_speed = 10;
2552 else if (phba->lmt & LMT_8Gb)
2553 max_speed = 8;
2554 else if (phba->lmt & LMT_4Gb)
2555 max_speed = 4;
2556 else if (phba->lmt & LMT_2Gb)
2557 max_speed = 2;
2558 else if (phba->lmt & LMT_1Gb)
2559 max_speed = 1;
2560 else
2561 max_speed = 0;
2562
2563 vp = &phba->vpd;
2564
2565 switch (dev_id) {
2566 case PCI_DEVICE_ID_FIREFLY:
2567 m = (typeof(m)){"LP6000", "PCI",
2568 "Obsolete, Unsupported Fibre Channel Adapter"};
2569 break;
2570 case PCI_DEVICE_ID_SUPERFLY:
2571 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2572 m = (typeof(m)){"LP7000", "PCI", ""};
2573 else
2574 m = (typeof(m)){"LP7000E", "PCI", ""};
2575 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2576 break;
2577 case PCI_DEVICE_ID_DRAGONFLY:
2578 m = (typeof(m)){"LP8000", "PCI",
2579 "Obsolete, Unsupported Fibre Channel Adapter"};
2580 break;
2581 case PCI_DEVICE_ID_CENTAUR:
2582 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2583 m = (typeof(m)){"LP9002", "PCI", ""};
2584 else
2585 m = (typeof(m)){"LP9000", "PCI", ""};
2586 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2587 break;
2588 case PCI_DEVICE_ID_RFLY:
2589 m = (typeof(m)){"LP952", "PCI",
2590 "Obsolete, Unsupported Fibre Channel Adapter"};
2591 break;
2592 case PCI_DEVICE_ID_PEGASUS:
2593 m = (typeof(m)){"LP9802", "PCI-X",
2594 "Obsolete, Unsupported Fibre Channel Adapter"};
2595 break;
2596 case PCI_DEVICE_ID_THOR:
2597 m = (typeof(m)){"LP10000", "PCI-X",
2598 "Obsolete, Unsupported Fibre Channel Adapter"};
2599 break;
2600 case PCI_DEVICE_ID_VIPER:
2601 m = (typeof(m)){"LPX1000", "PCI-X",
2602 "Obsolete, Unsupported Fibre Channel Adapter"};
2603 break;
2604 case PCI_DEVICE_ID_PFLY:
2605 m = (typeof(m)){"LP982", "PCI-X",
2606 "Obsolete, Unsupported Fibre Channel Adapter"};
2607 break;
2608 case PCI_DEVICE_ID_TFLY:
2609 m = (typeof(m)){"LP1050", "PCI-X",
2610 "Obsolete, Unsupported Fibre Channel Adapter"};
2611 break;
2612 case PCI_DEVICE_ID_HELIOS:
2613 m = (typeof(m)){"LP11000", "PCI-X2",
2614 "Obsolete, Unsupported Fibre Channel Adapter"};
2615 break;
2616 case PCI_DEVICE_ID_HELIOS_SCSP:
2617 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2618 "Obsolete, Unsupported Fibre Channel Adapter"};
2619 break;
2620 case PCI_DEVICE_ID_HELIOS_DCSP:
2621 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2622 "Obsolete, Unsupported Fibre Channel Adapter"};
2623 break;
2624 case PCI_DEVICE_ID_NEPTUNE:
2625 m = (typeof(m)){"LPe1000", "PCIe",
2626 "Obsolete, Unsupported Fibre Channel Adapter"};
2627 break;
2628 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2629 m = (typeof(m)){"LPe1000-SP", "PCIe",
2630 "Obsolete, Unsupported Fibre Channel Adapter"};
2631 break;
2632 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2633 m = (typeof(m)){"LPe1002-SP", "PCIe",
2634 "Obsolete, Unsupported Fibre Channel Adapter"};
2635 break;
2636 case PCI_DEVICE_ID_BMID:
2637 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2638 break;
2639 case PCI_DEVICE_ID_BSMB:
2640 m = (typeof(m)){"LP111", "PCI-X2",
2641 "Obsolete, Unsupported Fibre Channel Adapter"};
2642 break;
2643 case PCI_DEVICE_ID_ZEPHYR:
2644 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2645 break;
2646 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2647 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2648 break;
2649 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2650 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2651 GE = 1;
2652 break;
2653 case PCI_DEVICE_ID_ZMID:
2654 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2655 break;
2656 case PCI_DEVICE_ID_ZSMB:
2657 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2658 break;
2659 case PCI_DEVICE_ID_LP101:
2660 m = (typeof(m)){"LP101", "PCI-X",
2661 "Obsolete, Unsupported Fibre Channel Adapter"};
2662 break;
2663 case PCI_DEVICE_ID_LP10000S:
2664 m = (typeof(m)){"LP10000-S", "PCI",
2665 "Obsolete, Unsupported Fibre Channel Adapter"};
2666 break;
2667 case PCI_DEVICE_ID_LP11000S:
2668 m = (typeof(m)){"LP11000-S", "PCI-X2",
2669 "Obsolete, Unsupported Fibre Channel Adapter"};
2670 break;
2671 case PCI_DEVICE_ID_LPE11000S:
2672 m = (typeof(m)){"LPe11000-S", "PCIe",
2673 "Obsolete, Unsupported Fibre Channel Adapter"};
2674 break;
2675 case PCI_DEVICE_ID_SAT:
2676 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2677 break;
2678 case PCI_DEVICE_ID_SAT_MID:
2679 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2680 break;
2681 case PCI_DEVICE_ID_SAT_SMB:
2682 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2683 break;
2684 case PCI_DEVICE_ID_SAT_DCSP:
2685 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2686 break;
2687 case PCI_DEVICE_ID_SAT_SCSP:
2688 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2689 break;
2690 case PCI_DEVICE_ID_SAT_S:
2691 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2692 break;
2693 case PCI_DEVICE_ID_PROTEUS_VF:
2694 m = (typeof(m)){"LPev12000", "PCIe IOV",
2695 "Obsolete, Unsupported Fibre Channel Adapter"};
2696 break;
2697 case PCI_DEVICE_ID_PROTEUS_PF:
2698 m = (typeof(m)){"LPev12000", "PCIe IOV",
2699 "Obsolete, Unsupported Fibre Channel Adapter"};
2700 break;
2701 case PCI_DEVICE_ID_PROTEUS_S:
2702 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2703 "Obsolete, Unsupported Fibre Channel Adapter"};
2704 break;
2705 case PCI_DEVICE_ID_TIGERSHARK:
2706 oneConnect = 1;
2707 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2708 break;
2709 case PCI_DEVICE_ID_TOMCAT:
2710 oneConnect = 1;
2711 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2712 break;
2713 case PCI_DEVICE_ID_FALCON:
2714 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2715 "EmulexSecure Fibre"};
2716 break;
2717 case PCI_DEVICE_ID_BALIUS:
2718 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2719 "Obsolete, Unsupported Fibre Channel Adapter"};
2720 break;
2721 case PCI_DEVICE_ID_LANCER_FC:
2722 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2723 break;
2724 case PCI_DEVICE_ID_LANCER_FC_VF:
2725 m = (typeof(m)){"LPe16000", "PCIe",
2726 "Obsolete, Unsupported Fibre Channel Adapter"};
2727 break;
2728 case PCI_DEVICE_ID_LANCER_FCOE:
2729 oneConnect = 1;
2730 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2731 break;
2732 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2733 oneConnect = 1;
2734 m = (typeof(m)){"OCe15100", "PCIe",
2735 "Obsolete, Unsupported FCoE"};
2736 break;
2737 case PCI_DEVICE_ID_LANCER_G6_FC:
2738 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2739 break;
2740 case PCI_DEVICE_ID_LANCER_G7_FC:
2741 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2742 break;
2743 case PCI_DEVICE_ID_LANCER_G7P_FC:
2744 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2745 break;
2746 case PCI_DEVICE_ID_SKYHAWK:
2747 case PCI_DEVICE_ID_SKYHAWK_VF:
2748 oneConnect = 1;
2749 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2750 break;
2751 default:
2752 m = (typeof(m)){"Unknown", "", ""};
2753 break;
2754 }
2755
2756 if (mdp && mdp[0] == '\0')
2757 snprintf(mdp, 79,"%s", m.name);
2758 /*
2759 * oneConnect hba requires special processing, they are all initiators
2760 * and we put the port number on the end
2761 */
2762 if (descp && descp[0] == '\0') {
2763 if (oneConnect)
2764 snprintf(descp, 255,
2765 "Emulex OneConnect %s, %s Initiator %s",
2766 m.name, m.function,
2767 phba->Port);
2768 else if (max_speed == 0)
2769 snprintf(descp, 255,
2770 "Emulex %s %s %s",
2771 m.name, m.bus, m.function);
2772 else
2773 snprintf(descp, 255,
2774 "Emulex %s %d%s %s %s",
2775 m.name, max_speed, (GE) ? "GE" : "Gb",
2776 m.bus, m.function);
2777 }
2778 }
2779
2780 /**
2781 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2782 * @phba: pointer to lpfc hba data structure.
2783 * @pring: pointer to a IOCB ring.
2784 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2785 *
2786 * This routine posts a given number of IOCBs with the associated DMA buffer
2787 * descriptors specified by the cnt argument to the given IOCB ring.
2788 *
2789 * Return codes
2790 * The number of IOCBs NOT able to be posted to the IOCB ring.
2791 **/
2792 int
lpfc_sli3_post_buffer(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,int cnt)2793 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2794 {
2795 IOCB_t *icmd;
2796 struct lpfc_iocbq *iocb;
2797 struct lpfc_dmabuf *mp1, *mp2;
2798
2799 cnt += pring->missbufcnt;
2800
2801 /* While there are buffers to post */
2802 while (cnt > 0) {
2803 /* Allocate buffer for command iocb */
2804 iocb = lpfc_sli_get_iocbq(phba);
2805 if (iocb == NULL) {
2806 pring->missbufcnt = cnt;
2807 return cnt;
2808 }
2809 icmd = &iocb->iocb;
2810
2811 /* 2 buffers can be posted per command */
2812 /* Allocate buffer to post */
2813 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2814 if (mp1)
2815 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2816 if (!mp1 || !mp1->virt) {
2817 kfree(mp1);
2818 lpfc_sli_release_iocbq(phba, iocb);
2819 pring->missbufcnt = cnt;
2820 return cnt;
2821 }
2822
2823 INIT_LIST_HEAD(&mp1->list);
2824 /* Allocate buffer to post */
2825 if (cnt > 1) {
2826 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2827 if (mp2)
2828 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2829 &mp2->phys);
2830 if (!mp2 || !mp2->virt) {
2831 kfree(mp2);
2832 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2833 kfree(mp1);
2834 lpfc_sli_release_iocbq(phba, iocb);
2835 pring->missbufcnt = cnt;
2836 return cnt;
2837 }
2838
2839 INIT_LIST_HEAD(&mp2->list);
2840 } else {
2841 mp2 = NULL;
2842 }
2843
2844 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2845 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2846 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2847 icmd->ulpBdeCount = 1;
2848 cnt--;
2849 if (mp2) {
2850 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2851 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2852 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2853 cnt--;
2854 icmd->ulpBdeCount = 2;
2855 }
2856
2857 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2858 icmd->ulpLe = 1;
2859
2860 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2861 IOCB_ERROR) {
2862 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2863 kfree(mp1);
2864 cnt++;
2865 if (mp2) {
2866 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2867 kfree(mp2);
2868 cnt++;
2869 }
2870 lpfc_sli_release_iocbq(phba, iocb);
2871 pring->missbufcnt = cnt;
2872 return cnt;
2873 }
2874 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2875 if (mp2)
2876 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2877 }
2878 pring->missbufcnt = 0;
2879 return 0;
2880 }
2881
2882 /**
2883 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2884 * @phba: pointer to lpfc hba data structure.
2885 *
2886 * This routine posts initial receive IOCB buffers to the ELS ring. The
2887 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2888 * set to 64 IOCBs. SLI3 only.
2889 *
2890 * Return codes
2891 * 0 - success (currently always success)
2892 **/
2893 static int
lpfc_post_rcv_buf(struct lpfc_hba * phba)2894 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2895 {
2896 struct lpfc_sli *psli = &phba->sli;
2897
2898 /* Ring 0, ELS / CT buffers */
2899 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2900 /* Ring 2 - FCP no buffers needed */
2901
2902 return 0;
2903 }
2904
2905 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2906
2907 /**
2908 * lpfc_sha_init - Set up initial array of hash table entries
2909 * @HashResultPointer: pointer to an array as hash table.
2910 *
2911 * This routine sets up the initial values to the array of hash table entries
2912 * for the LC HBAs.
2913 **/
2914 static void
lpfc_sha_init(uint32_t * HashResultPointer)2915 lpfc_sha_init(uint32_t * HashResultPointer)
2916 {
2917 HashResultPointer[0] = 0x67452301;
2918 HashResultPointer[1] = 0xEFCDAB89;
2919 HashResultPointer[2] = 0x98BADCFE;
2920 HashResultPointer[3] = 0x10325476;
2921 HashResultPointer[4] = 0xC3D2E1F0;
2922 }
2923
2924 /**
2925 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2926 * @HashResultPointer: pointer to an initial/result hash table.
2927 * @HashWorkingPointer: pointer to an working hash table.
2928 *
2929 * This routine iterates an initial hash table pointed by @HashResultPointer
2930 * with the values from the working hash table pointeed by @HashWorkingPointer.
2931 * The results are putting back to the initial hash table, returned through
2932 * the @HashResultPointer as the result hash table.
2933 **/
2934 static void
lpfc_sha_iterate(uint32_t * HashResultPointer,uint32_t * HashWorkingPointer)2935 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2936 {
2937 int t;
2938 uint32_t TEMP;
2939 uint32_t A, B, C, D, E;
2940 t = 16;
2941 do {
2942 HashWorkingPointer[t] =
2943 S(1,
2944 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2945 8] ^
2946 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2947 } while (++t <= 79);
2948 t = 0;
2949 A = HashResultPointer[0];
2950 B = HashResultPointer[1];
2951 C = HashResultPointer[2];
2952 D = HashResultPointer[3];
2953 E = HashResultPointer[4];
2954
2955 do {
2956 if (t < 20) {
2957 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2958 } else if (t < 40) {
2959 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2960 } else if (t < 60) {
2961 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2962 } else {
2963 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2964 }
2965 TEMP += S(5, A) + E + HashWorkingPointer[t];
2966 E = D;
2967 D = C;
2968 C = S(30, B);
2969 B = A;
2970 A = TEMP;
2971 } while (++t <= 79);
2972
2973 HashResultPointer[0] += A;
2974 HashResultPointer[1] += B;
2975 HashResultPointer[2] += C;
2976 HashResultPointer[3] += D;
2977 HashResultPointer[4] += E;
2978
2979 }
2980
2981 /**
2982 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2983 * @RandomChallenge: pointer to the entry of host challenge random number array.
2984 * @HashWorking: pointer to the entry of the working hash array.
2985 *
2986 * This routine calculates the working hash array referred by @HashWorking
2987 * from the challenge random numbers associated with the host, referred by
2988 * @RandomChallenge. The result is put into the entry of the working hash
2989 * array and returned by reference through @HashWorking.
2990 **/
2991 static void
lpfc_challenge_key(uint32_t * RandomChallenge,uint32_t * HashWorking)2992 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2993 {
2994 *HashWorking = (*RandomChallenge ^ *HashWorking);
2995 }
2996
2997 /**
2998 * lpfc_hba_init - Perform special handling for LC HBA initialization
2999 * @phba: pointer to lpfc hba data structure.
3000 * @hbainit: pointer to an array of unsigned 32-bit integers.
3001 *
3002 * This routine performs the special handling for LC HBA initialization.
3003 **/
3004 void
lpfc_hba_init(struct lpfc_hba * phba,uint32_t * hbainit)3005 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
3006 {
3007 int t;
3008 uint32_t *HashWorking;
3009 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3010
3011 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
3012 if (!HashWorking)
3013 return;
3014
3015 HashWorking[0] = HashWorking[78] = *pwwnn++;
3016 HashWorking[1] = HashWorking[79] = *pwwnn;
3017
3018 for (t = 0; t < 7; t++)
3019 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3020
3021 lpfc_sha_init(hbainit);
3022 lpfc_sha_iterate(hbainit, HashWorking);
3023 kfree(HashWorking);
3024 }
3025
3026 /**
3027 * lpfc_cleanup - Performs vport cleanups before deleting a vport
3028 * @vport: pointer to a virtual N_Port data structure.
3029 *
3030 * This routine performs the necessary cleanups before deleting the @vport.
3031 * It invokes the discovery state machine to perform necessary state
3032 * transitions and to release the ndlps associated with the @vport. Note,
3033 * the physical port is treated as @vport 0.
3034 **/
3035 void
lpfc_cleanup(struct lpfc_vport * vport)3036 lpfc_cleanup(struct lpfc_vport *vport)
3037 {
3038 struct lpfc_hba *phba = vport->phba;
3039 struct lpfc_nodelist *ndlp, *next_ndlp;
3040 int i = 0;
3041
3042 if (phba->link_state > LPFC_LINK_DOWN)
3043 lpfc_port_link_failure(vport);
3044
3045 /* Clean up VMID resources */
3046 if (lpfc_is_vmid_enabled(phba))
3047 lpfc_vmid_vport_cleanup(vport);
3048
3049 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3050 if (vport->port_type != LPFC_PHYSICAL_PORT &&
3051 ndlp->nlp_DID == Fabric_DID) {
3052 /* Just free up ndlp with Fabric_DID for vports */
3053 lpfc_nlp_put(ndlp);
3054 continue;
3055 }
3056
3057 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
3058 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3059 lpfc_nlp_put(ndlp);
3060 continue;
3061 }
3062
3063 /* Fabric Ports not in UNMAPPED state are cleaned up in the
3064 * DEVICE_RM event.
3065 */
3066 if (ndlp->nlp_type & NLP_FABRIC &&
3067 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3068 lpfc_disc_state_machine(vport, ndlp, NULL,
3069 NLP_EVT_DEVICE_RECOVERY);
3070
3071 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3072 lpfc_disc_state_machine(vport, ndlp, NULL,
3073 NLP_EVT_DEVICE_RM);
3074 }
3075
3076 /* This is a special case flush to return all
3077 * IOs before entering this loop. There are
3078 * two points in the code where a flush is
3079 * avoided if the FC_UNLOADING flag is set.
3080 * one is in the multipool destroy,
3081 * (this prevents a crash) and the other is
3082 * in the nvme abort handler, ( also prevents
3083 * a crash). Both of these exceptions are
3084 * cases where the slot is still accessible.
3085 * The flush here is only when the pci slot
3086 * is offline.
3087 */
3088 if (vport->load_flag & FC_UNLOADING &&
3089 pci_channel_offline(phba->pcidev))
3090 lpfc_sli_flush_io_rings(vport->phba);
3091
3092 /* At this point, ALL ndlp's should be gone
3093 * because of the previous NLP_EVT_DEVICE_RM.
3094 * Lets wait for this to happen, if needed.
3095 */
3096 while (!list_empty(&vport->fc_nodes)) {
3097 if (i++ > 3000) {
3098 lpfc_printf_vlog(vport, KERN_ERR,
3099 LOG_TRACE_EVENT,
3100 "0233 Nodelist not empty\n");
3101 list_for_each_entry_safe(ndlp, next_ndlp,
3102 &vport->fc_nodes, nlp_listp) {
3103 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3104 LOG_DISCOVERY,
3105 "0282 did:x%x ndlp:x%px "
3106 "refcnt:%d xflags x%x nflag x%x\n",
3107 ndlp->nlp_DID, (void *)ndlp,
3108 kref_read(&ndlp->kref),
3109 ndlp->fc4_xpt_flags,
3110 ndlp->nlp_flag);
3111 }
3112 break;
3113 }
3114
3115 /* Wait for any activity on ndlps to settle */
3116 msleep(10);
3117 }
3118 lpfc_cleanup_vports_rrqs(vport, NULL);
3119 }
3120
3121 /**
3122 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3123 * @vport: pointer to a virtual N_Port data structure.
3124 *
3125 * This routine stops all the timers associated with a @vport. This function
3126 * is invoked before disabling or deleting a @vport. Note that the physical
3127 * port is treated as @vport 0.
3128 **/
3129 void
lpfc_stop_vport_timers(struct lpfc_vport * vport)3130 lpfc_stop_vport_timers(struct lpfc_vport *vport)
3131 {
3132 del_timer_sync(&vport->els_tmofunc);
3133 del_timer_sync(&vport->delayed_disc_tmo);
3134 lpfc_can_disctmo(vport);
3135 return;
3136 }
3137
3138 /**
3139 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3140 * @phba: pointer to lpfc hba data structure.
3141 *
3142 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
3143 * caller of this routine should already hold the host lock.
3144 **/
3145 void
__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba * phba)3146 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3147 {
3148 /* Clear pending FCF rediscovery wait flag */
3149 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3150
3151 /* Now, try to stop the timer */
3152 del_timer(&phba->fcf.redisc_wait);
3153 }
3154
3155 /**
3156 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3157 * @phba: pointer to lpfc hba data structure.
3158 *
3159 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3160 * checks whether the FCF rediscovery wait timer is pending with the host
3161 * lock held before proceeding with disabling the timer and clearing the
3162 * wait timer pendig flag.
3163 **/
3164 void
lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba * phba)3165 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3166 {
3167 spin_lock_irq(&phba->hbalock);
3168 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3169 /* FCF rediscovery timer already fired or stopped */
3170 spin_unlock_irq(&phba->hbalock);
3171 return;
3172 }
3173 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3174 /* Clear failover in progress flags */
3175 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3176 spin_unlock_irq(&phba->hbalock);
3177 }
3178
3179 /**
3180 * lpfc_cmf_stop - Stop CMF processing
3181 * @phba: pointer to lpfc hba data structure.
3182 *
3183 * This is called when the link goes down or if CMF mode is turned OFF.
3184 * It is also called when going offline or unloaded just before the
3185 * congestion info buffer is unregistered.
3186 **/
3187 void
lpfc_cmf_stop(struct lpfc_hba * phba)3188 lpfc_cmf_stop(struct lpfc_hba *phba)
3189 {
3190 int cpu;
3191 struct lpfc_cgn_stat *cgs;
3192
3193 /* We only do something if CMF is enabled */
3194 if (!phba->sli4_hba.pc_sli4_params.cmf)
3195 return;
3196
3197 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3198 "6221 Stop CMF / Cancel Timer\n");
3199
3200 /* Cancel the CMF timer */
3201 hrtimer_cancel(&phba->cmf_stats_timer);
3202 hrtimer_cancel(&phba->cmf_timer);
3203
3204 /* Zero CMF counters */
3205 atomic_set(&phba->cmf_busy, 0);
3206 for_each_present_cpu(cpu) {
3207 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3208 atomic64_set(&cgs->total_bytes, 0);
3209 atomic64_set(&cgs->rcv_bytes, 0);
3210 atomic_set(&cgs->rx_io_cnt, 0);
3211 atomic64_set(&cgs->rx_latency, 0);
3212 }
3213 atomic_set(&phba->cmf_bw_wait, 0);
3214
3215 /* Resume any blocked IO - Queue unblock on workqueue */
3216 queue_work(phba->wq, &phba->unblock_request_work);
3217 }
3218
3219 static inline uint64_t
lpfc_get_max_line_rate(struct lpfc_hba * phba)3220 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3221 {
3222 uint64_t rate = lpfc_sli_port_speed_get(phba);
3223
3224 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3225 }
3226
3227 void
lpfc_cmf_signal_init(struct lpfc_hba * phba)3228 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3229 {
3230 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3231 "6223 Signal CMF init\n");
3232
3233 /* Use the new fc_linkspeed to recalculate */
3234 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3235 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3236 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3237 phba->cmf_interval_rate, 1000);
3238 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3239
3240 /* This is a signal to firmware to sync up CMF BW with link speed */
3241 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3242 }
3243
3244 /**
3245 * lpfc_cmf_start - Start CMF processing
3246 * @phba: pointer to lpfc hba data structure.
3247 *
3248 * This is called when the link comes up or if CMF mode is turned OFF
3249 * to Monitor or Managed.
3250 **/
3251 void
lpfc_cmf_start(struct lpfc_hba * phba)3252 lpfc_cmf_start(struct lpfc_hba *phba)
3253 {
3254 struct lpfc_cgn_stat *cgs;
3255 int cpu;
3256
3257 /* We only do something if CMF is enabled */
3258 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3259 phba->cmf_active_mode == LPFC_CFG_OFF)
3260 return;
3261
3262 /* Reinitialize congestion buffer info */
3263 lpfc_init_congestion_buf(phba);
3264
3265 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3266 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3267 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3268 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3269
3270 atomic_set(&phba->cmf_busy, 0);
3271 for_each_present_cpu(cpu) {
3272 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3273 atomic64_set(&cgs->total_bytes, 0);
3274 atomic64_set(&cgs->rcv_bytes, 0);
3275 atomic_set(&cgs->rx_io_cnt, 0);
3276 atomic64_set(&cgs->rx_latency, 0);
3277 }
3278 phba->cmf_latency.tv_sec = 0;
3279 phba->cmf_latency.tv_nsec = 0;
3280
3281 lpfc_cmf_signal_init(phba);
3282
3283 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3284 "6222 Start CMF / Timer\n");
3285
3286 phba->cmf_timer_cnt = 0;
3287 hrtimer_start(&phba->cmf_timer,
3288 ktime_set(0, LPFC_CMF_INTERVAL * NSEC_PER_MSEC),
3289 HRTIMER_MODE_REL);
3290 hrtimer_start(&phba->cmf_stats_timer,
3291 ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC),
3292 HRTIMER_MODE_REL);
3293 /* Setup for latency check in IO cmpl routines */
3294 ktime_get_real_ts64(&phba->cmf_latency);
3295
3296 atomic_set(&phba->cmf_bw_wait, 0);
3297 atomic_set(&phba->cmf_stop_io, 0);
3298 }
3299
3300 /**
3301 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3302 * @phba: pointer to lpfc hba data structure.
3303 *
3304 * This routine stops all the timers associated with a HBA. This function is
3305 * invoked before either putting a HBA offline or unloading the driver.
3306 **/
3307 void
lpfc_stop_hba_timers(struct lpfc_hba * phba)3308 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3309 {
3310 if (phba->pport)
3311 lpfc_stop_vport_timers(phba->pport);
3312 cancel_delayed_work_sync(&phba->eq_delay_work);
3313 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3314 del_timer_sync(&phba->sli.mbox_tmo);
3315 del_timer_sync(&phba->fabric_block_timer);
3316 del_timer_sync(&phba->eratt_poll);
3317 del_timer_sync(&phba->hb_tmofunc);
3318 if (phba->sli_rev == LPFC_SLI_REV4) {
3319 del_timer_sync(&phba->rrq_tmr);
3320 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3321 }
3322 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3323
3324 switch (phba->pci_dev_grp) {
3325 case LPFC_PCI_DEV_LP:
3326 /* Stop any LightPulse device specific driver timers */
3327 del_timer_sync(&phba->fcp_poll_timer);
3328 break;
3329 case LPFC_PCI_DEV_OC:
3330 /* Stop any OneConnect device specific driver timers */
3331 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3332 break;
3333 default:
3334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3335 "0297 Invalid device group (x%x)\n",
3336 phba->pci_dev_grp);
3337 break;
3338 }
3339 return;
3340 }
3341
3342 /**
3343 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3344 * @phba: pointer to lpfc hba data structure.
3345 * @mbx_action: flag for mailbox no wait action.
3346 *
3347 * This routine marks a HBA's management interface as blocked. Once the HBA's
3348 * management interface is marked as blocked, all the user space access to
3349 * the HBA, whether they are from sysfs interface or libdfc interface will
3350 * all be blocked. The HBA is set to block the management interface when the
3351 * driver prepares the HBA interface for online or offline.
3352 **/
3353 static void
lpfc_block_mgmt_io(struct lpfc_hba * phba,int mbx_action)3354 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3355 {
3356 unsigned long iflag;
3357 uint8_t actcmd = MBX_HEARTBEAT;
3358 unsigned long timeout;
3359
3360 spin_lock_irqsave(&phba->hbalock, iflag);
3361 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3362 spin_unlock_irqrestore(&phba->hbalock, iflag);
3363 if (mbx_action == LPFC_MBX_NO_WAIT)
3364 return;
3365 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3366 spin_lock_irqsave(&phba->hbalock, iflag);
3367 if (phba->sli.mbox_active) {
3368 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3369 /* Determine how long we might wait for the active mailbox
3370 * command to be gracefully completed by firmware.
3371 */
3372 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3373 phba->sli.mbox_active) * 1000) + jiffies;
3374 }
3375 spin_unlock_irqrestore(&phba->hbalock, iflag);
3376
3377 /* Wait for the outstnading mailbox command to complete */
3378 while (phba->sli.mbox_active) {
3379 /* Check active mailbox complete status every 2ms */
3380 msleep(2);
3381 if (time_after(jiffies, timeout)) {
3382 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3383 "2813 Mgmt IO is Blocked %x "
3384 "- mbox cmd %x still active\n",
3385 phba->sli.sli_flag, actcmd);
3386 break;
3387 }
3388 }
3389 }
3390
3391 /**
3392 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3393 * @phba: pointer to lpfc hba data structure.
3394 *
3395 * Allocate RPIs for all active remote nodes. This is needed whenever
3396 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3397 * is to fixup the temporary rpi assignments.
3398 **/
3399 void
lpfc_sli4_node_prep(struct lpfc_hba * phba)3400 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3401 {
3402 struct lpfc_nodelist *ndlp, *next_ndlp;
3403 struct lpfc_vport **vports;
3404 int i, rpi;
3405
3406 if (phba->sli_rev != LPFC_SLI_REV4)
3407 return;
3408
3409 vports = lpfc_create_vport_work_array(phba);
3410 if (vports == NULL)
3411 return;
3412
3413 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3414 if (vports[i]->load_flag & FC_UNLOADING)
3415 continue;
3416
3417 list_for_each_entry_safe(ndlp, next_ndlp,
3418 &vports[i]->fc_nodes,
3419 nlp_listp) {
3420 rpi = lpfc_sli4_alloc_rpi(phba);
3421 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3422 /* TODO print log? */
3423 continue;
3424 }
3425 ndlp->nlp_rpi = rpi;
3426 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3427 LOG_NODE | LOG_DISCOVERY,
3428 "0009 Assign RPI x%x to ndlp x%px "
3429 "DID:x%06x flg:x%x\n",
3430 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3431 ndlp->nlp_flag);
3432 }
3433 }
3434 lpfc_destroy_vport_work_array(phba, vports);
3435 }
3436
3437 /**
3438 * lpfc_create_expedite_pool - create expedite pool
3439 * @phba: pointer to lpfc hba data structure.
3440 *
3441 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3442 * to expedite pool. Mark them as expedite.
3443 **/
lpfc_create_expedite_pool(struct lpfc_hba * phba)3444 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3445 {
3446 struct lpfc_sli4_hdw_queue *qp;
3447 struct lpfc_io_buf *lpfc_ncmd;
3448 struct lpfc_io_buf *lpfc_ncmd_next;
3449 struct lpfc_epd_pool *epd_pool;
3450 unsigned long iflag;
3451
3452 epd_pool = &phba->epd_pool;
3453 qp = &phba->sli4_hba.hdwq[0];
3454
3455 spin_lock_init(&epd_pool->lock);
3456 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3457 spin_lock(&epd_pool->lock);
3458 INIT_LIST_HEAD(&epd_pool->list);
3459 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3460 &qp->lpfc_io_buf_list_put, list) {
3461 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3462 lpfc_ncmd->expedite = true;
3463 qp->put_io_bufs--;
3464 epd_pool->count++;
3465 if (epd_pool->count >= XRI_BATCH)
3466 break;
3467 }
3468 spin_unlock(&epd_pool->lock);
3469 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3470 }
3471
3472 /**
3473 * lpfc_destroy_expedite_pool - destroy expedite pool
3474 * @phba: pointer to lpfc hba data structure.
3475 *
3476 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3477 * of HWQ 0. Clear the mark.
3478 **/
lpfc_destroy_expedite_pool(struct lpfc_hba * phba)3479 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3480 {
3481 struct lpfc_sli4_hdw_queue *qp;
3482 struct lpfc_io_buf *lpfc_ncmd;
3483 struct lpfc_io_buf *lpfc_ncmd_next;
3484 struct lpfc_epd_pool *epd_pool;
3485 unsigned long iflag;
3486
3487 epd_pool = &phba->epd_pool;
3488 qp = &phba->sli4_hba.hdwq[0];
3489
3490 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3491 spin_lock(&epd_pool->lock);
3492 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3493 &epd_pool->list, list) {
3494 list_move_tail(&lpfc_ncmd->list,
3495 &qp->lpfc_io_buf_list_put);
3496 lpfc_ncmd->flags = false;
3497 qp->put_io_bufs++;
3498 epd_pool->count--;
3499 }
3500 spin_unlock(&epd_pool->lock);
3501 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3502 }
3503
3504 /**
3505 * lpfc_create_multixri_pools - create multi-XRI pools
3506 * @phba: pointer to lpfc hba data structure.
3507 *
3508 * This routine initialize public, private per HWQ. Then, move XRIs from
3509 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3510 * Initialized.
3511 **/
lpfc_create_multixri_pools(struct lpfc_hba * phba)3512 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3513 {
3514 u32 i, j;
3515 u32 hwq_count;
3516 u32 count_per_hwq;
3517 struct lpfc_io_buf *lpfc_ncmd;
3518 struct lpfc_io_buf *lpfc_ncmd_next;
3519 unsigned long iflag;
3520 struct lpfc_sli4_hdw_queue *qp;
3521 struct lpfc_multixri_pool *multixri_pool;
3522 struct lpfc_pbl_pool *pbl_pool;
3523 struct lpfc_pvt_pool *pvt_pool;
3524
3525 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3526 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3527 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3528 phba->sli4_hba.io_xri_cnt);
3529
3530 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3531 lpfc_create_expedite_pool(phba);
3532
3533 hwq_count = phba->cfg_hdw_queue;
3534 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3535
3536 for (i = 0; i < hwq_count; i++) {
3537 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3538
3539 if (!multixri_pool) {
3540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3541 "1238 Failed to allocate memory for "
3542 "multixri_pool\n");
3543
3544 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3545 lpfc_destroy_expedite_pool(phba);
3546
3547 j = 0;
3548 while (j < i) {
3549 qp = &phba->sli4_hba.hdwq[j];
3550 kfree(qp->p_multixri_pool);
3551 j++;
3552 }
3553 phba->cfg_xri_rebalancing = 0;
3554 return;
3555 }
3556
3557 qp = &phba->sli4_hba.hdwq[i];
3558 qp->p_multixri_pool = multixri_pool;
3559
3560 multixri_pool->xri_limit = count_per_hwq;
3561 multixri_pool->rrb_next_hwqid = i;
3562
3563 /* Deal with public free xri pool */
3564 pbl_pool = &multixri_pool->pbl_pool;
3565 spin_lock_init(&pbl_pool->lock);
3566 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3567 spin_lock(&pbl_pool->lock);
3568 INIT_LIST_HEAD(&pbl_pool->list);
3569 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3570 &qp->lpfc_io_buf_list_put, list) {
3571 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3572 qp->put_io_bufs--;
3573 pbl_pool->count++;
3574 }
3575 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3576 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3577 pbl_pool->count, i);
3578 spin_unlock(&pbl_pool->lock);
3579 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3580
3581 /* Deal with private free xri pool */
3582 pvt_pool = &multixri_pool->pvt_pool;
3583 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3584 pvt_pool->low_watermark = XRI_BATCH;
3585 spin_lock_init(&pvt_pool->lock);
3586 spin_lock_irqsave(&pvt_pool->lock, iflag);
3587 INIT_LIST_HEAD(&pvt_pool->list);
3588 pvt_pool->count = 0;
3589 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3590 }
3591 }
3592
3593 /**
3594 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3595 * @phba: pointer to lpfc hba data structure.
3596 *
3597 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3598 **/
lpfc_destroy_multixri_pools(struct lpfc_hba * phba)3599 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3600 {
3601 u32 i;
3602 u32 hwq_count;
3603 struct lpfc_io_buf *lpfc_ncmd;
3604 struct lpfc_io_buf *lpfc_ncmd_next;
3605 unsigned long iflag;
3606 struct lpfc_sli4_hdw_queue *qp;
3607 struct lpfc_multixri_pool *multixri_pool;
3608 struct lpfc_pbl_pool *pbl_pool;
3609 struct lpfc_pvt_pool *pvt_pool;
3610
3611 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3612 lpfc_destroy_expedite_pool(phba);
3613
3614 if (!(phba->pport->load_flag & FC_UNLOADING))
3615 lpfc_sli_flush_io_rings(phba);
3616
3617 hwq_count = phba->cfg_hdw_queue;
3618
3619 for (i = 0; i < hwq_count; i++) {
3620 qp = &phba->sli4_hba.hdwq[i];
3621 multixri_pool = qp->p_multixri_pool;
3622 if (!multixri_pool)
3623 continue;
3624
3625 qp->p_multixri_pool = NULL;
3626
3627 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3628
3629 /* Deal with public free xri pool */
3630 pbl_pool = &multixri_pool->pbl_pool;
3631 spin_lock(&pbl_pool->lock);
3632
3633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3634 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3635 pbl_pool->count, i);
3636
3637 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3638 &pbl_pool->list, list) {
3639 list_move_tail(&lpfc_ncmd->list,
3640 &qp->lpfc_io_buf_list_put);
3641 qp->put_io_bufs++;
3642 pbl_pool->count--;
3643 }
3644
3645 INIT_LIST_HEAD(&pbl_pool->list);
3646 pbl_pool->count = 0;
3647
3648 spin_unlock(&pbl_pool->lock);
3649
3650 /* Deal with private free xri pool */
3651 pvt_pool = &multixri_pool->pvt_pool;
3652 spin_lock(&pvt_pool->lock);
3653
3654 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3655 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3656 pvt_pool->count, i);
3657
3658 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3659 &pvt_pool->list, list) {
3660 list_move_tail(&lpfc_ncmd->list,
3661 &qp->lpfc_io_buf_list_put);
3662 qp->put_io_bufs++;
3663 pvt_pool->count--;
3664 }
3665
3666 INIT_LIST_HEAD(&pvt_pool->list);
3667 pvt_pool->count = 0;
3668
3669 spin_unlock(&pvt_pool->lock);
3670 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3671
3672 kfree(multixri_pool);
3673 }
3674 }
3675
3676 /**
3677 * lpfc_online - Initialize and bring a HBA online
3678 * @phba: pointer to lpfc hba data structure.
3679 *
3680 * This routine initializes the HBA and brings a HBA online. During this
3681 * process, the management interface is blocked to prevent user space access
3682 * to the HBA interfering with the driver initialization.
3683 *
3684 * Return codes
3685 * 0 - successful
3686 * 1 - failed
3687 **/
3688 int
lpfc_online(struct lpfc_hba * phba)3689 lpfc_online(struct lpfc_hba *phba)
3690 {
3691 struct lpfc_vport *vport;
3692 struct lpfc_vport **vports;
3693 int i, error = 0;
3694 bool vpis_cleared = false;
3695
3696 if (!phba)
3697 return 0;
3698 vport = phba->pport;
3699
3700 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3701 return 0;
3702
3703 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3704 "0458 Bring Adapter online\n");
3705
3706 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3707
3708 if (phba->sli_rev == LPFC_SLI_REV4) {
3709 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3710 lpfc_unblock_mgmt_io(phba);
3711 return 1;
3712 }
3713 spin_lock_irq(&phba->hbalock);
3714 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3715 vpis_cleared = true;
3716 spin_unlock_irq(&phba->hbalock);
3717
3718 /* Reestablish the local initiator port.
3719 * The offline process destroyed the previous lport.
3720 */
3721 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3722 !phba->nvmet_support) {
3723 error = lpfc_nvme_create_localport(phba->pport);
3724 if (error)
3725 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3726 "6132 NVME restore reg failed "
3727 "on nvmei error x%x\n", error);
3728 }
3729 } else {
3730 lpfc_sli_queue_init(phba);
3731 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3732 lpfc_unblock_mgmt_io(phba);
3733 return 1;
3734 }
3735 }
3736
3737 vports = lpfc_create_vport_work_array(phba);
3738 if (vports != NULL) {
3739 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3740 struct Scsi_Host *shost;
3741 shost = lpfc_shost_from_vport(vports[i]);
3742 spin_lock_irq(shost->host_lock);
3743 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3744 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3745 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3746 if (phba->sli_rev == LPFC_SLI_REV4) {
3747 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3748 if ((vpis_cleared) &&
3749 (vports[i]->port_type !=
3750 LPFC_PHYSICAL_PORT))
3751 vports[i]->vpi = 0;
3752 }
3753 spin_unlock_irq(shost->host_lock);
3754 }
3755 }
3756 lpfc_destroy_vport_work_array(phba, vports);
3757
3758 if (phba->cfg_xri_rebalancing)
3759 lpfc_create_multixri_pools(phba);
3760
3761 lpfc_cpuhp_add(phba);
3762
3763 lpfc_unblock_mgmt_io(phba);
3764 return 0;
3765 }
3766
3767 /**
3768 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3769 * @phba: pointer to lpfc hba data structure.
3770 *
3771 * This routine marks a HBA's management interface as not blocked. Once the
3772 * HBA's management interface is marked as not blocked, all the user space
3773 * access to the HBA, whether they are from sysfs interface or libdfc
3774 * interface will be allowed. The HBA is set to block the management interface
3775 * when the driver prepares the HBA interface for online or offline and then
3776 * set to unblock the management interface afterwards.
3777 **/
3778 void
lpfc_unblock_mgmt_io(struct lpfc_hba * phba)3779 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3780 {
3781 unsigned long iflag;
3782
3783 spin_lock_irqsave(&phba->hbalock, iflag);
3784 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3785 spin_unlock_irqrestore(&phba->hbalock, iflag);
3786 }
3787
3788 /**
3789 * lpfc_offline_prep - Prepare a HBA to be brought offline
3790 * @phba: pointer to lpfc hba data structure.
3791 * @mbx_action: flag for mailbox shutdown action.
3792 *
3793 * This routine is invoked to prepare a HBA to be brought offline. It performs
3794 * unregistration login to all the nodes on all vports and flushes the mailbox
3795 * queue to make it ready to be brought offline.
3796 **/
3797 void
lpfc_offline_prep(struct lpfc_hba * phba,int mbx_action)3798 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3799 {
3800 struct lpfc_vport *vport = phba->pport;
3801 struct lpfc_nodelist *ndlp, *next_ndlp;
3802 struct lpfc_vport **vports;
3803 struct Scsi_Host *shost;
3804 int i;
3805 int offline;
3806 bool hba_pci_err;
3807
3808 if (vport->fc_flag & FC_OFFLINE_MODE)
3809 return;
3810
3811 lpfc_block_mgmt_io(phba, mbx_action);
3812
3813 lpfc_linkdown(phba);
3814
3815 offline = pci_channel_offline(phba->pcidev);
3816 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3817
3818 /* Issue an unreg_login to all nodes on all vports */
3819 vports = lpfc_create_vport_work_array(phba);
3820 if (vports != NULL) {
3821 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3822 if (vports[i]->load_flag & FC_UNLOADING)
3823 continue;
3824 shost = lpfc_shost_from_vport(vports[i]);
3825 spin_lock_irq(shost->host_lock);
3826 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3827 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3828 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3829 spin_unlock_irq(shost->host_lock);
3830
3831 shost = lpfc_shost_from_vport(vports[i]);
3832 list_for_each_entry_safe(ndlp, next_ndlp,
3833 &vports[i]->fc_nodes,
3834 nlp_listp) {
3835
3836 spin_lock_irq(&ndlp->lock);
3837 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3838 spin_unlock_irq(&ndlp->lock);
3839
3840 if (offline || hba_pci_err) {
3841 spin_lock_irq(&ndlp->lock);
3842 ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3843 NLP_RPI_REGISTERED);
3844 spin_unlock_irq(&ndlp->lock);
3845 if (phba->sli_rev == LPFC_SLI_REV4)
3846 lpfc_sli_rpi_release(vports[i],
3847 ndlp);
3848 } else {
3849 lpfc_unreg_rpi(vports[i], ndlp);
3850 }
3851 /*
3852 * Whenever an SLI4 port goes offline, free the
3853 * RPI. Get a new RPI when the adapter port
3854 * comes back online.
3855 */
3856 if (phba->sli_rev == LPFC_SLI_REV4) {
3857 lpfc_printf_vlog(vports[i], KERN_INFO,
3858 LOG_NODE | LOG_DISCOVERY,
3859 "0011 Free RPI x%x on "
3860 "ndlp: x%px did x%x\n",
3861 ndlp->nlp_rpi, ndlp,
3862 ndlp->nlp_DID);
3863 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3864 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3865 }
3866
3867 if (ndlp->nlp_type & NLP_FABRIC) {
3868 lpfc_disc_state_machine(vports[i], ndlp,
3869 NULL, NLP_EVT_DEVICE_RECOVERY);
3870
3871 /* Don't remove the node unless the node
3872 * has been unregistered with the
3873 * transport, and we're not in recovery
3874 * before dev_loss_tmo triggered.
3875 * Otherwise, let dev_loss take care of
3876 * the node.
3877 */
3878 if (!(ndlp->save_flags &
3879 NLP_IN_RECOV_POST_DEV_LOSS) &&
3880 !(ndlp->fc4_xpt_flags &
3881 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3882 lpfc_disc_state_machine
3883 (vports[i], ndlp,
3884 NULL,
3885 NLP_EVT_DEVICE_RM);
3886 }
3887 }
3888 }
3889 }
3890 lpfc_destroy_vport_work_array(phba, vports);
3891
3892 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3893
3894 if (phba->wq)
3895 flush_workqueue(phba->wq);
3896 }
3897
3898 /**
3899 * lpfc_offline - Bring a HBA offline
3900 * @phba: pointer to lpfc hba data structure.
3901 *
3902 * This routine actually brings a HBA offline. It stops all the timers
3903 * associated with the HBA, brings down the SLI layer, and eventually
3904 * marks the HBA as in offline state for the upper layer protocol.
3905 **/
3906 void
lpfc_offline(struct lpfc_hba * phba)3907 lpfc_offline(struct lpfc_hba *phba)
3908 {
3909 struct Scsi_Host *shost;
3910 struct lpfc_vport **vports;
3911 int i;
3912
3913 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3914 return;
3915
3916 /* stop port and all timers associated with this hba */
3917 lpfc_stop_port(phba);
3918
3919 /* Tear down the local and target port registrations. The
3920 * nvme transports need to cleanup.
3921 */
3922 lpfc_nvmet_destroy_targetport(phba);
3923 lpfc_nvme_destroy_localport(phba->pport);
3924
3925 vports = lpfc_create_vport_work_array(phba);
3926 if (vports != NULL)
3927 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3928 lpfc_stop_vport_timers(vports[i]);
3929 lpfc_destroy_vport_work_array(phba, vports);
3930 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3931 "0460 Bring Adapter offline\n");
3932 /* Bring down the SLI Layer and cleanup. The HBA is offline
3933 now. */
3934 lpfc_sli_hba_down(phba);
3935 spin_lock_irq(&phba->hbalock);
3936 phba->work_ha = 0;
3937 spin_unlock_irq(&phba->hbalock);
3938 vports = lpfc_create_vport_work_array(phba);
3939 if (vports != NULL)
3940 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3941 shost = lpfc_shost_from_vport(vports[i]);
3942 spin_lock_irq(shost->host_lock);
3943 vports[i]->work_port_events = 0;
3944 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3945 spin_unlock_irq(shost->host_lock);
3946 }
3947 lpfc_destroy_vport_work_array(phba, vports);
3948 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3949 * in hba_unset
3950 */
3951 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3952 __lpfc_cpuhp_remove(phba);
3953
3954 if (phba->cfg_xri_rebalancing)
3955 lpfc_destroy_multixri_pools(phba);
3956 }
3957
3958 /**
3959 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3960 * @phba: pointer to lpfc hba data structure.
3961 *
3962 * This routine is to free all the SCSI buffers and IOCBs from the driver
3963 * list back to kernel. It is called from lpfc_pci_remove_one to free
3964 * the internal resources before the device is removed from the system.
3965 **/
3966 static void
lpfc_scsi_free(struct lpfc_hba * phba)3967 lpfc_scsi_free(struct lpfc_hba *phba)
3968 {
3969 struct lpfc_io_buf *sb, *sb_next;
3970
3971 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3972 return;
3973
3974 spin_lock_irq(&phba->hbalock);
3975
3976 /* Release all the lpfc_scsi_bufs maintained by this host. */
3977
3978 spin_lock(&phba->scsi_buf_list_put_lock);
3979 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3980 list) {
3981 list_del(&sb->list);
3982 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3983 sb->dma_handle);
3984 kfree(sb);
3985 phba->total_scsi_bufs--;
3986 }
3987 spin_unlock(&phba->scsi_buf_list_put_lock);
3988
3989 spin_lock(&phba->scsi_buf_list_get_lock);
3990 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3991 list) {
3992 list_del(&sb->list);
3993 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3994 sb->dma_handle);
3995 kfree(sb);
3996 phba->total_scsi_bufs--;
3997 }
3998 spin_unlock(&phba->scsi_buf_list_get_lock);
3999 spin_unlock_irq(&phba->hbalock);
4000 }
4001
4002 /**
4003 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
4004 * @phba: pointer to lpfc hba data structure.
4005 *
4006 * This routine is to free all the IO buffers and IOCBs from the driver
4007 * list back to kernel. It is called from lpfc_pci_remove_one to free
4008 * the internal resources before the device is removed from the system.
4009 **/
4010 void
lpfc_io_free(struct lpfc_hba * phba)4011 lpfc_io_free(struct lpfc_hba *phba)
4012 {
4013 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
4014 struct lpfc_sli4_hdw_queue *qp;
4015 int idx;
4016
4017 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4018 qp = &phba->sli4_hba.hdwq[idx];
4019 /* Release all the lpfc_nvme_bufs maintained by this host. */
4020 spin_lock(&qp->io_buf_list_put_lock);
4021 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4022 &qp->lpfc_io_buf_list_put,
4023 list) {
4024 list_del(&lpfc_ncmd->list);
4025 qp->put_io_bufs--;
4026 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4027 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4028 if (phba->cfg_xpsgl && !phba->nvmet_support)
4029 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4030 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4031 kfree(lpfc_ncmd);
4032 qp->total_io_bufs--;
4033 }
4034 spin_unlock(&qp->io_buf_list_put_lock);
4035
4036 spin_lock(&qp->io_buf_list_get_lock);
4037 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4038 &qp->lpfc_io_buf_list_get,
4039 list) {
4040 list_del(&lpfc_ncmd->list);
4041 qp->get_io_bufs--;
4042 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4043 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4044 if (phba->cfg_xpsgl && !phba->nvmet_support)
4045 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4046 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4047 kfree(lpfc_ncmd);
4048 qp->total_io_bufs--;
4049 }
4050 spin_unlock(&qp->io_buf_list_get_lock);
4051 }
4052 }
4053
4054 /**
4055 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4056 * @phba: pointer to lpfc hba data structure.
4057 *
4058 * This routine first calculates the sizes of the current els and allocated
4059 * scsi sgl lists, and then goes through all sgls to updates the physical
4060 * XRIs assigned due to port function reset. During port initialization, the
4061 * current els and allocated scsi sgl lists are 0s.
4062 *
4063 * Return codes
4064 * 0 - successful (for now, it always returns 0)
4065 **/
4066 int
lpfc_sli4_els_sgl_update(struct lpfc_hba * phba)4067 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4068 {
4069 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4070 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4071 LIST_HEAD(els_sgl_list);
4072 int rc;
4073
4074 /*
4075 * update on pci function's els xri-sgl list
4076 */
4077 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4078
4079 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4080 /* els xri-sgl expanded */
4081 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4082 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4083 "3157 ELS xri-sgl count increased from "
4084 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4085 els_xri_cnt);
4086 /* allocate the additional els sgls */
4087 for (i = 0; i < xri_cnt; i++) {
4088 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4089 GFP_KERNEL);
4090 if (sglq_entry == NULL) {
4091 lpfc_printf_log(phba, KERN_ERR,
4092 LOG_TRACE_EVENT,
4093 "2562 Failure to allocate an "
4094 "ELS sgl entry:%d\n", i);
4095 rc = -ENOMEM;
4096 goto out_free_mem;
4097 }
4098 sglq_entry->buff_type = GEN_BUFF_TYPE;
4099 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4100 &sglq_entry->phys);
4101 if (sglq_entry->virt == NULL) {
4102 kfree(sglq_entry);
4103 lpfc_printf_log(phba, KERN_ERR,
4104 LOG_TRACE_EVENT,
4105 "2563 Failure to allocate an "
4106 "ELS mbuf:%d\n", i);
4107 rc = -ENOMEM;
4108 goto out_free_mem;
4109 }
4110 sglq_entry->sgl = sglq_entry->virt;
4111 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4112 sglq_entry->state = SGL_FREED;
4113 list_add_tail(&sglq_entry->list, &els_sgl_list);
4114 }
4115 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4116 list_splice_init(&els_sgl_list,
4117 &phba->sli4_hba.lpfc_els_sgl_list);
4118 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4119 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4120 /* els xri-sgl shrinked */
4121 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4122 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4123 "3158 ELS xri-sgl count decreased from "
4124 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4125 els_xri_cnt);
4126 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4127 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4128 &els_sgl_list);
4129 /* release extra els sgls from list */
4130 for (i = 0; i < xri_cnt; i++) {
4131 list_remove_head(&els_sgl_list,
4132 sglq_entry, struct lpfc_sglq, list);
4133 if (sglq_entry) {
4134 __lpfc_mbuf_free(phba, sglq_entry->virt,
4135 sglq_entry->phys);
4136 kfree(sglq_entry);
4137 }
4138 }
4139 list_splice_init(&els_sgl_list,
4140 &phba->sli4_hba.lpfc_els_sgl_list);
4141 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4142 } else
4143 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4144 "3163 ELS xri-sgl count unchanged: %d\n",
4145 els_xri_cnt);
4146 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4147
4148 /* update xris to els sgls on the list */
4149 sglq_entry = NULL;
4150 sglq_entry_next = NULL;
4151 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4152 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4153 lxri = lpfc_sli4_next_xritag(phba);
4154 if (lxri == NO_XRI) {
4155 lpfc_printf_log(phba, KERN_ERR,
4156 LOG_TRACE_EVENT,
4157 "2400 Failed to allocate xri for "
4158 "ELS sgl\n");
4159 rc = -ENOMEM;
4160 goto out_free_mem;
4161 }
4162 sglq_entry->sli4_lxritag = lxri;
4163 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4164 }
4165 return 0;
4166
4167 out_free_mem:
4168 lpfc_free_els_sgl_list(phba);
4169 return rc;
4170 }
4171
4172 /**
4173 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4174 * @phba: pointer to lpfc hba data structure.
4175 *
4176 * This routine first calculates the sizes of the current els and allocated
4177 * scsi sgl lists, and then goes through all sgls to updates the physical
4178 * XRIs assigned due to port function reset. During port initialization, the
4179 * current els and allocated scsi sgl lists are 0s.
4180 *
4181 * Return codes
4182 * 0 - successful (for now, it always returns 0)
4183 **/
4184 int
lpfc_sli4_nvmet_sgl_update(struct lpfc_hba * phba)4185 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4186 {
4187 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4188 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4189 uint16_t nvmet_xri_cnt;
4190 LIST_HEAD(nvmet_sgl_list);
4191 int rc;
4192
4193 /*
4194 * update on pci function's nvmet xri-sgl list
4195 */
4196 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4197
4198 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4199 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4200 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4201 /* els xri-sgl expanded */
4202 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4203 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4204 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4205 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4206 /* allocate the additional nvmet sgls */
4207 for (i = 0; i < xri_cnt; i++) {
4208 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4209 GFP_KERNEL);
4210 if (sglq_entry == NULL) {
4211 lpfc_printf_log(phba, KERN_ERR,
4212 LOG_TRACE_EVENT,
4213 "6303 Failure to allocate an "
4214 "NVMET sgl entry:%d\n", i);
4215 rc = -ENOMEM;
4216 goto out_free_mem;
4217 }
4218 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4219 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4220 &sglq_entry->phys);
4221 if (sglq_entry->virt == NULL) {
4222 kfree(sglq_entry);
4223 lpfc_printf_log(phba, KERN_ERR,
4224 LOG_TRACE_EVENT,
4225 "6304 Failure to allocate an "
4226 "NVMET buf:%d\n", i);
4227 rc = -ENOMEM;
4228 goto out_free_mem;
4229 }
4230 sglq_entry->sgl = sglq_entry->virt;
4231 memset(sglq_entry->sgl, 0,
4232 phba->cfg_sg_dma_buf_size);
4233 sglq_entry->state = SGL_FREED;
4234 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4235 }
4236 spin_lock_irq(&phba->hbalock);
4237 spin_lock(&phba->sli4_hba.sgl_list_lock);
4238 list_splice_init(&nvmet_sgl_list,
4239 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4240 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4241 spin_unlock_irq(&phba->hbalock);
4242 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4243 /* nvmet xri-sgl shrunk */
4244 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4245 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4246 "6305 NVMET xri-sgl count decreased from "
4247 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4248 nvmet_xri_cnt);
4249 spin_lock_irq(&phba->hbalock);
4250 spin_lock(&phba->sli4_hba.sgl_list_lock);
4251 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4252 &nvmet_sgl_list);
4253 /* release extra nvmet sgls from list */
4254 for (i = 0; i < xri_cnt; i++) {
4255 list_remove_head(&nvmet_sgl_list,
4256 sglq_entry, struct lpfc_sglq, list);
4257 if (sglq_entry) {
4258 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4259 sglq_entry->phys);
4260 kfree(sglq_entry);
4261 }
4262 }
4263 list_splice_init(&nvmet_sgl_list,
4264 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4265 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4266 spin_unlock_irq(&phba->hbalock);
4267 } else
4268 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4269 "6306 NVMET xri-sgl count unchanged: %d\n",
4270 nvmet_xri_cnt);
4271 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4272
4273 /* update xris to nvmet sgls on the list */
4274 sglq_entry = NULL;
4275 sglq_entry_next = NULL;
4276 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4277 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4278 lxri = lpfc_sli4_next_xritag(phba);
4279 if (lxri == NO_XRI) {
4280 lpfc_printf_log(phba, KERN_ERR,
4281 LOG_TRACE_EVENT,
4282 "6307 Failed to allocate xri for "
4283 "NVMET sgl\n");
4284 rc = -ENOMEM;
4285 goto out_free_mem;
4286 }
4287 sglq_entry->sli4_lxritag = lxri;
4288 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4289 }
4290 return 0;
4291
4292 out_free_mem:
4293 lpfc_free_nvmet_sgl_list(phba);
4294 return rc;
4295 }
4296
4297 int
lpfc_io_buf_flush(struct lpfc_hba * phba,struct list_head * cbuf)4298 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4299 {
4300 LIST_HEAD(blist);
4301 struct lpfc_sli4_hdw_queue *qp;
4302 struct lpfc_io_buf *lpfc_cmd;
4303 struct lpfc_io_buf *iobufp, *prev_iobufp;
4304 int idx, cnt, xri, inserted;
4305
4306 cnt = 0;
4307 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4308 qp = &phba->sli4_hba.hdwq[idx];
4309 spin_lock_irq(&qp->io_buf_list_get_lock);
4310 spin_lock(&qp->io_buf_list_put_lock);
4311
4312 /* Take everything off the get and put lists */
4313 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4314 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4315 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4316 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4317 cnt += qp->get_io_bufs + qp->put_io_bufs;
4318 qp->get_io_bufs = 0;
4319 qp->put_io_bufs = 0;
4320 qp->total_io_bufs = 0;
4321 spin_unlock(&qp->io_buf_list_put_lock);
4322 spin_unlock_irq(&qp->io_buf_list_get_lock);
4323 }
4324
4325 /*
4326 * Take IO buffers off blist and put on cbuf sorted by XRI.
4327 * This is because POST_SGL takes a sequential range of XRIs
4328 * to post to the firmware.
4329 */
4330 for (idx = 0; idx < cnt; idx++) {
4331 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4332 if (!lpfc_cmd)
4333 return cnt;
4334 if (idx == 0) {
4335 list_add_tail(&lpfc_cmd->list, cbuf);
4336 continue;
4337 }
4338 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4339 inserted = 0;
4340 prev_iobufp = NULL;
4341 list_for_each_entry(iobufp, cbuf, list) {
4342 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4343 if (prev_iobufp)
4344 list_add(&lpfc_cmd->list,
4345 &prev_iobufp->list);
4346 else
4347 list_add(&lpfc_cmd->list, cbuf);
4348 inserted = 1;
4349 break;
4350 }
4351 prev_iobufp = iobufp;
4352 }
4353 if (!inserted)
4354 list_add_tail(&lpfc_cmd->list, cbuf);
4355 }
4356 return cnt;
4357 }
4358
4359 int
lpfc_io_buf_replenish(struct lpfc_hba * phba,struct list_head * cbuf)4360 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4361 {
4362 struct lpfc_sli4_hdw_queue *qp;
4363 struct lpfc_io_buf *lpfc_cmd;
4364 int idx, cnt;
4365 unsigned long iflags;
4366
4367 qp = phba->sli4_hba.hdwq;
4368 cnt = 0;
4369 while (!list_empty(cbuf)) {
4370 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4371 list_remove_head(cbuf, lpfc_cmd,
4372 struct lpfc_io_buf, list);
4373 if (!lpfc_cmd)
4374 return cnt;
4375 cnt++;
4376 qp = &phba->sli4_hba.hdwq[idx];
4377 lpfc_cmd->hdwq_no = idx;
4378 lpfc_cmd->hdwq = qp;
4379 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4380 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
4381 list_add_tail(&lpfc_cmd->list,
4382 &qp->lpfc_io_buf_list_put);
4383 qp->put_io_bufs++;
4384 qp->total_io_bufs++;
4385 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
4386 iflags);
4387 }
4388 }
4389 return cnt;
4390 }
4391
4392 /**
4393 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4394 * @phba: pointer to lpfc hba data structure.
4395 *
4396 * This routine first calculates the sizes of the current els and allocated
4397 * scsi sgl lists, and then goes through all sgls to updates the physical
4398 * XRIs assigned due to port function reset. During port initialization, the
4399 * current els and allocated scsi sgl lists are 0s.
4400 *
4401 * Return codes
4402 * 0 - successful (for now, it always returns 0)
4403 **/
4404 int
lpfc_sli4_io_sgl_update(struct lpfc_hba * phba)4405 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4406 {
4407 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4408 uint16_t i, lxri, els_xri_cnt;
4409 uint16_t io_xri_cnt, io_xri_max;
4410 LIST_HEAD(io_sgl_list);
4411 int rc, cnt;
4412
4413 /*
4414 * update on pci function's allocated nvme xri-sgl list
4415 */
4416
4417 /* maximum number of xris available for nvme buffers */
4418 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4419 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4420 phba->sli4_hba.io_xri_max = io_xri_max;
4421
4422 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4423 "6074 Current allocated XRI sgl count:%d, "
4424 "maximum XRI count:%d els_xri_cnt:%d\n\n",
4425 phba->sli4_hba.io_xri_cnt,
4426 phba->sli4_hba.io_xri_max,
4427 els_xri_cnt);
4428
4429 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4430
4431 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4432 /* max nvme xri shrunk below the allocated nvme buffers */
4433 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4434 phba->sli4_hba.io_xri_max;
4435 /* release the extra allocated nvme buffers */
4436 for (i = 0; i < io_xri_cnt; i++) {
4437 list_remove_head(&io_sgl_list, lpfc_ncmd,
4438 struct lpfc_io_buf, list);
4439 if (lpfc_ncmd) {
4440 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4441 lpfc_ncmd->data,
4442 lpfc_ncmd->dma_handle);
4443 kfree(lpfc_ncmd);
4444 }
4445 }
4446 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4447 }
4448
4449 /* update xris associated to remaining allocated nvme buffers */
4450 lpfc_ncmd = NULL;
4451 lpfc_ncmd_next = NULL;
4452 phba->sli4_hba.io_xri_cnt = cnt;
4453 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4454 &io_sgl_list, list) {
4455 lxri = lpfc_sli4_next_xritag(phba);
4456 if (lxri == NO_XRI) {
4457 lpfc_printf_log(phba, KERN_ERR,
4458 LOG_TRACE_EVENT,
4459 "6075 Failed to allocate xri for "
4460 "nvme buffer\n");
4461 rc = -ENOMEM;
4462 goto out_free_mem;
4463 }
4464 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4465 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4466 }
4467 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4468 return 0;
4469
4470 out_free_mem:
4471 lpfc_io_free(phba);
4472 return rc;
4473 }
4474
4475 /**
4476 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4477 * @phba: Pointer to lpfc hba data structure.
4478 * @num_to_alloc: The requested number of buffers to allocate.
4479 *
4480 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4481 * the nvme buffer contains all the necessary information needed to initiate
4482 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4483 * them on a list, it post them to the port by using SGL block post.
4484 *
4485 * Return codes:
4486 * int - number of IO buffers that were allocated and posted.
4487 * 0 = failure, less than num_to_alloc is a partial failure.
4488 **/
4489 int
lpfc_new_io_buf(struct lpfc_hba * phba,int num_to_alloc)4490 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4491 {
4492 struct lpfc_io_buf *lpfc_ncmd;
4493 struct lpfc_iocbq *pwqeq;
4494 uint16_t iotag, lxri = 0;
4495 int bcnt, num_posted;
4496 LIST_HEAD(prep_nblist);
4497 LIST_HEAD(post_nblist);
4498 LIST_HEAD(nvme_nblist);
4499
4500 phba->sli4_hba.io_xri_cnt = 0;
4501 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4502 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4503 if (!lpfc_ncmd)
4504 break;
4505 /*
4506 * Get memory from the pci pool to map the virt space to
4507 * pci bus space for an I/O. The DMA buffer includes the
4508 * number of SGE's necessary to support the sg_tablesize.
4509 */
4510 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4511 GFP_KERNEL,
4512 &lpfc_ncmd->dma_handle);
4513 if (!lpfc_ncmd->data) {
4514 kfree(lpfc_ncmd);
4515 break;
4516 }
4517
4518 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4519 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4520 } else {
4521 /*
4522 * 4K Page alignment is CRITICAL to BlockGuard, double
4523 * check to be sure.
4524 */
4525 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4526 (((unsigned long)(lpfc_ncmd->data) &
4527 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4528 lpfc_printf_log(phba, KERN_ERR,
4529 LOG_TRACE_EVENT,
4530 "3369 Memory alignment err: "
4531 "addr=%lx\n",
4532 (unsigned long)lpfc_ncmd->data);
4533 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4534 lpfc_ncmd->data,
4535 lpfc_ncmd->dma_handle);
4536 kfree(lpfc_ncmd);
4537 break;
4538 }
4539 }
4540
4541 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4542
4543 lxri = lpfc_sli4_next_xritag(phba);
4544 if (lxri == NO_XRI) {
4545 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4546 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4547 kfree(lpfc_ncmd);
4548 break;
4549 }
4550 pwqeq = &lpfc_ncmd->cur_iocbq;
4551
4552 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4553 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4554 if (iotag == 0) {
4555 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4556 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4557 kfree(lpfc_ncmd);
4558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4559 "6121 Failed to allocate IOTAG for"
4560 " XRI:0x%x\n", lxri);
4561 lpfc_sli4_free_xri(phba, lxri);
4562 break;
4563 }
4564 pwqeq->sli4_lxritag = lxri;
4565 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4566
4567 /* Initialize local short-hand pointers. */
4568 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4569 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4570 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4571 spin_lock_init(&lpfc_ncmd->buf_lock);
4572
4573 /* add the nvme buffer to a post list */
4574 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4575 phba->sli4_hba.io_xri_cnt++;
4576 }
4577 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4578 "6114 Allocate %d out of %d requested new NVME "
4579 "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
4580 sizeof(*lpfc_ncmd));
4581
4582
4583 /* post the list of nvme buffer sgls to port if available */
4584 if (!list_empty(&post_nblist))
4585 num_posted = lpfc_sli4_post_io_sgl_list(
4586 phba, &post_nblist, bcnt);
4587 else
4588 num_posted = 0;
4589
4590 return num_posted;
4591 }
4592
4593 static uint64_t
lpfc_get_wwpn(struct lpfc_hba * phba)4594 lpfc_get_wwpn(struct lpfc_hba *phba)
4595 {
4596 uint64_t wwn;
4597 int rc;
4598 LPFC_MBOXQ_t *mboxq;
4599 MAILBOX_t *mb;
4600
4601 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4602 GFP_KERNEL);
4603 if (!mboxq)
4604 return (uint64_t)-1;
4605
4606 /* First get WWN of HBA instance */
4607 lpfc_read_nv(phba, mboxq);
4608 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4609 if (rc != MBX_SUCCESS) {
4610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4611 "6019 Mailbox failed , mbxCmd x%x "
4612 "READ_NV, mbxStatus x%x\n",
4613 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4614 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4615 mempool_free(mboxq, phba->mbox_mem_pool);
4616 return (uint64_t) -1;
4617 }
4618 mb = &mboxq->u.mb;
4619 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4620 /* wwn is WWPN of HBA instance */
4621 mempool_free(mboxq, phba->mbox_mem_pool);
4622 if (phba->sli_rev == LPFC_SLI_REV4)
4623 return be64_to_cpu(wwn);
4624 else
4625 return rol64(wwn, 32);
4626 }
4627
lpfc_get_sg_tablesize(struct lpfc_hba * phba)4628 static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
4629 {
4630 if (phba->sli_rev == LPFC_SLI_REV4)
4631 if (phba->cfg_xpsgl && !phba->nvmet_support)
4632 return LPFC_MAX_SG_TABLESIZE;
4633 else
4634 return phba->cfg_scsi_seg_cnt;
4635 else
4636 return phba->cfg_sg_seg_cnt;
4637 }
4638
4639 /**
4640 * lpfc_vmid_res_alloc - Allocates resources for VMID
4641 * @phba: pointer to lpfc hba data structure.
4642 * @vport: pointer to vport data structure
4643 *
4644 * This routine allocated the resources needed for the VMID.
4645 *
4646 * Return codes
4647 * 0 on Success
4648 * Non-0 on Failure
4649 */
4650 static int
lpfc_vmid_res_alloc(struct lpfc_hba * phba,struct lpfc_vport * vport)4651 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4652 {
4653 /* VMID feature is supported only on SLI4 */
4654 if (phba->sli_rev == LPFC_SLI_REV3) {
4655 phba->cfg_vmid_app_header = 0;
4656 phba->cfg_vmid_priority_tagging = 0;
4657 }
4658
4659 if (lpfc_is_vmid_enabled(phba)) {
4660 vport->vmid =
4661 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4662 GFP_KERNEL);
4663 if (!vport->vmid)
4664 return -ENOMEM;
4665
4666 rwlock_init(&vport->vmid_lock);
4667
4668 /* Set the VMID parameters for the vport */
4669 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4670 vport->vmid_inactivity_timeout =
4671 phba->cfg_vmid_inactivity_timeout;
4672 vport->max_vmid = phba->cfg_max_vmid;
4673 vport->cur_vmid_cnt = 0;
4674
4675 vport->vmid_priority_range = bitmap_zalloc
4676 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4677
4678 if (!vport->vmid_priority_range) {
4679 kfree(vport->vmid);
4680 return -ENOMEM;
4681 }
4682
4683 hash_init(vport->hash_table);
4684 }
4685 return 0;
4686 }
4687
4688 /**
4689 * lpfc_create_port - Create an FC port
4690 * @phba: pointer to lpfc hba data structure.
4691 * @instance: a unique integer ID to this FC port.
4692 * @dev: pointer to the device data structure.
4693 *
4694 * This routine creates a FC port for the upper layer protocol. The FC port
4695 * can be created on top of either a physical port or a virtual port provided
4696 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4697 * and associates the FC port created before adding the shost into the SCSI
4698 * layer.
4699 *
4700 * Return codes
4701 * @vport - pointer to the virtual N_Port data structure.
4702 * NULL - port create failed.
4703 **/
4704 struct lpfc_vport *
lpfc_create_port(struct lpfc_hba * phba,int instance,struct device * dev)4705 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4706 {
4707 struct lpfc_vport *vport;
4708 struct Scsi_Host *shost = NULL;
4709 struct scsi_host_template *template;
4710 int error = 0;
4711 int i;
4712 uint64_t wwn;
4713 bool use_no_reset_hba = false;
4714 int rc;
4715
4716 if (lpfc_no_hba_reset_cnt) {
4717 if (phba->sli_rev < LPFC_SLI_REV4 &&
4718 dev == &phba->pcidev->dev) {
4719 /* Reset the port first */
4720 lpfc_sli_brdrestart(phba);
4721 rc = lpfc_sli_chipset_init(phba);
4722 if (rc)
4723 return NULL;
4724 }
4725 wwn = lpfc_get_wwpn(phba);
4726 }
4727
4728 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4729 if (wwn == lpfc_no_hba_reset[i]) {
4730 lpfc_printf_log(phba, KERN_ERR,
4731 LOG_TRACE_EVENT,
4732 "6020 Setting use_no_reset port=%llx\n",
4733 wwn);
4734 use_no_reset_hba = true;
4735 break;
4736 }
4737 }
4738
4739 /* Seed template for SCSI host registration */
4740 if (dev == &phba->pcidev->dev) {
4741 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4742 /* Seed physical port template */
4743 template = &lpfc_template;
4744
4745 if (use_no_reset_hba)
4746 /* template is for a no reset SCSI Host */
4747 template->eh_host_reset_handler = NULL;
4748
4749 /* Seed updated value of sg_tablesize */
4750 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4751 } else {
4752 /* NVMET is for physical port only */
4753 template = &lpfc_template_nvme;
4754 }
4755 } else {
4756 /* Seed vport template */
4757 template = &lpfc_vport_template;
4758
4759 /* Seed updated value of sg_tablesize */
4760 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4761 }
4762
4763 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4764 if (!shost)
4765 goto out;
4766
4767 vport = (struct lpfc_vport *) shost->hostdata;
4768 vport->phba = phba;
4769 vport->load_flag |= FC_LOADING;
4770 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4771 vport->fc_rscn_flush = 0;
4772 lpfc_get_vport_cfgparam(vport);
4773
4774 /* Adjust value in vport */
4775 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4776
4777 shost->unique_id = instance;
4778 shost->max_id = LPFC_MAX_TARGET;
4779 shost->max_lun = vport->cfg_max_luns;
4780 shost->this_id = -1;
4781 shost->max_cmd_len = 16;
4782
4783 if (phba->sli_rev == LPFC_SLI_REV4) {
4784 if (!phba->cfg_fcp_mq_threshold ||
4785 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4786 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4787
4788 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4789 phba->cfg_fcp_mq_threshold);
4790
4791 shost->dma_boundary =
4792 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4793 } else
4794 /* SLI-3 has a limited number of hardware queues (3),
4795 * thus there is only one for FCP processing.
4796 */
4797 shost->nr_hw_queues = 1;
4798
4799 /*
4800 * Set initial can_queue value since 0 is no longer supported and
4801 * scsi_add_host will fail. This will be adjusted later based on the
4802 * max xri value determined in hba setup.
4803 */
4804 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4805 if (dev != &phba->pcidev->dev) {
4806 shost->transportt = lpfc_vport_transport_template;
4807 vport->port_type = LPFC_NPIV_PORT;
4808 } else {
4809 shost->transportt = lpfc_transport_template;
4810 vport->port_type = LPFC_PHYSICAL_PORT;
4811 }
4812
4813 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4814 "9081 CreatePort TMPLATE type %x TBLsize %d "
4815 "SEGcnt %d/%d\n",
4816 vport->port_type, shost->sg_tablesize,
4817 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4818
4819 /* Allocate the resources for VMID */
4820 rc = lpfc_vmid_res_alloc(phba, vport);
4821
4822 if (rc)
4823 goto out_put_shost;
4824
4825 /* Initialize all internally managed lists. */
4826 INIT_LIST_HEAD(&vport->fc_nodes);
4827 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4828 spin_lock_init(&vport->work_port_lock);
4829
4830 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4831
4832 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4833
4834 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4835
4836 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4837 lpfc_setup_bg(phba, shost);
4838
4839 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4840 if (error)
4841 goto out_free_vmid;
4842
4843 spin_lock_irq(&phba->port_list_lock);
4844 list_add_tail(&vport->listentry, &phba->port_list);
4845 spin_unlock_irq(&phba->port_list_lock);
4846 return vport;
4847
4848 out_free_vmid:
4849 kfree(vport->vmid);
4850 bitmap_free(vport->vmid_priority_range);
4851 out_put_shost:
4852 scsi_host_put(shost);
4853 out:
4854 return NULL;
4855 }
4856
4857 /**
4858 * destroy_port - destroy an FC port
4859 * @vport: pointer to an lpfc virtual N_Port data structure.
4860 *
4861 * This routine destroys a FC port from the upper layer protocol. All the
4862 * resources associated with the port are released.
4863 **/
4864 void
destroy_port(struct lpfc_vport * vport)4865 destroy_port(struct lpfc_vport *vport)
4866 {
4867 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4868 struct lpfc_hba *phba = vport->phba;
4869
4870 lpfc_debugfs_terminate(vport);
4871 fc_remove_host(shost);
4872 scsi_remove_host(shost);
4873
4874 spin_lock_irq(&phba->port_list_lock);
4875 list_del_init(&vport->listentry);
4876 spin_unlock_irq(&phba->port_list_lock);
4877
4878 lpfc_cleanup(vport);
4879 return;
4880 }
4881
4882 /**
4883 * lpfc_get_instance - Get a unique integer ID
4884 *
4885 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4886 * uses the kernel idr facility to perform the task.
4887 *
4888 * Return codes:
4889 * instance - a unique integer ID allocated as the new instance.
4890 * -1 - lpfc get instance failed.
4891 **/
4892 int
lpfc_get_instance(void)4893 lpfc_get_instance(void)
4894 {
4895 int ret;
4896
4897 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4898 return ret < 0 ? -1 : ret;
4899 }
4900
4901 /**
4902 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4903 * @shost: pointer to SCSI host data structure.
4904 * @time: elapsed time of the scan in jiffies.
4905 *
4906 * This routine is called by the SCSI layer with a SCSI host to determine
4907 * whether the scan host is finished.
4908 *
4909 * Note: there is no scan_start function as adapter initialization will have
4910 * asynchronously kicked off the link initialization.
4911 *
4912 * Return codes
4913 * 0 - SCSI host scan is not over yet.
4914 * 1 - SCSI host scan is over.
4915 **/
lpfc_scan_finished(struct Scsi_Host * shost,unsigned long time)4916 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4917 {
4918 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4919 struct lpfc_hba *phba = vport->phba;
4920 int stat = 0;
4921
4922 spin_lock_irq(shost->host_lock);
4923
4924 if (vport->load_flag & FC_UNLOADING) {
4925 stat = 1;
4926 goto finished;
4927 }
4928 if (time >= msecs_to_jiffies(30 * 1000)) {
4929 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4930 "0461 Scanning longer than 30 "
4931 "seconds. Continuing initialization\n");
4932 stat = 1;
4933 goto finished;
4934 }
4935 if (time >= msecs_to_jiffies(15 * 1000) &&
4936 phba->link_state <= LPFC_LINK_DOWN) {
4937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4938 "0465 Link down longer than 15 "
4939 "seconds. Continuing initialization\n");
4940 stat = 1;
4941 goto finished;
4942 }
4943
4944 if (vport->port_state != LPFC_VPORT_READY)
4945 goto finished;
4946 if (vport->num_disc_nodes || vport->fc_prli_sent)
4947 goto finished;
4948 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4949 goto finished;
4950 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4951 goto finished;
4952
4953 stat = 1;
4954
4955 finished:
4956 spin_unlock_irq(shost->host_lock);
4957 return stat;
4958 }
4959
lpfc_host_supported_speeds_set(struct Scsi_Host * shost)4960 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4961 {
4962 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4963 struct lpfc_hba *phba = vport->phba;
4964
4965 fc_host_supported_speeds(shost) = 0;
4966 /*
4967 * Avoid reporting supported link speed for FCoE as it can't be
4968 * controlled via FCoE.
4969 */
4970 if (phba->hba_flag & HBA_FCOE_MODE)
4971 return;
4972
4973 if (phba->lmt & LMT_256Gb)
4974 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4975 if (phba->lmt & LMT_128Gb)
4976 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4977 if (phba->lmt & LMT_64Gb)
4978 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4979 if (phba->lmt & LMT_32Gb)
4980 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4981 if (phba->lmt & LMT_16Gb)
4982 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4983 if (phba->lmt & LMT_10Gb)
4984 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4985 if (phba->lmt & LMT_8Gb)
4986 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4987 if (phba->lmt & LMT_4Gb)
4988 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4989 if (phba->lmt & LMT_2Gb)
4990 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4991 if (phba->lmt & LMT_1Gb)
4992 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4993 }
4994
4995 /**
4996 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4997 * @shost: pointer to SCSI host data structure.
4998 *
4999 * This routine initializes a given SCSI host attributes on a FC port. The
5000 * SCSI host can be either on top of a physical port or a virtual port.
5001 **/
lpfc_host_attrib_init(struct Scsi_Host * shost)5002 void lpfc_host_attrib_init(struct Scsi_Host *shost)
5003 {
5004 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5005 struct lpfc_hba *phba = vport->phba;
5006 /*
5007 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
5008 */
5009
5010 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5011 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5012 fc_host_supported_classes(shost) = FC_COS_CLASS3;
5013
5014 memset(fc_host_supported_fc4s(shost), 0,
5015 sizeof(fc_host_supported_fc4s(shost)));
5016 fc_host_supported_fc4s(shost)[2] = 1;
5017 fc_host_supported_fc4s(shost)[7] = 1;
5018
5019 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5020 sizeof fc_host_symbolic_name(shost));
5021
5022 lpfc_host_supported_speeds_set(shost);
5023
5024 fc_host_maxframe_size(shost) =
5025 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5026 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5027
5028 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5029
5030 /* This value is also unchanging */
5031 memset(fc_host_active_fc4s(shost), 0,
5032 sizeof(fc_host_active_fc4s(shost)));
5033 fc_host_active_fc4s(shost)[2] = 1;
5034 fc_host_active_fc4s(shost)[7] = 1;
5035
5036 fc_host_max_npiv_vports(shost) = phba->max_vpi;
5037 spin_lock_irq(shost->host_lock);
5038 vport->load_flag &= ~FC_LOADING;
5039 spin_unlock_irq(shost->host_lock);
5040 }
5041
5042 /**
5043 * lpfc_stop_port_s3 - Stop SLI3 device port
5044 * @phba: pointer to lpfc hba data structure.
5045 *
5046 * This routine is invoked to stop an SLI3 device port, it stops the device
5047 * from generating interrupts and stops the device driver's timers for the
5048 * device.
5049 **/
5050 static void
lpfc_stop_port_s3(struct lpfc_hba * phba)5051 lpfc_stop_port_s3(struct lpfc_hba *phba)
5052 {
5053 /* Clear all interrupt enable conditions */
5054 writel(0, phba->HCregaddr);
5055 readl(phba->HCregaddr); /* flush */
5056 /* Clear all pending interrupts */
5057 writel(0xffffffff, phba->HAregaddr);
5058 readl(phba->HAregaddr); /* flush */
5059
5060 /* Reset some HBA SLI setup states */
5061 lpfc_stop_hba_timers(phba);
5062 phba->pport->work_port_events = 0;
5063 }
5064
5065 /**
5066 * lpfc_stop_port_s4 - Stop SLI4 device port
5067 * @phba: pointer to lpfc hba data structure.
5068 *
5069 * This routine is invoked to stop an SLI4 device port, it stops the device
5070 * from generating interrupts and stops the device driver's timers for the
5071 * device.
5072 **/
5073 static void
lpfc_stop_port_s4(struct lpfc_hba * phba)5074 lpfc_stop_port_s4(struct lpfc_hba *phba)
5075 {
5076 /* Reset some HBA SLI4 setup states */
5077 lpfc_stop_hba_timers(phba);
5078 if (phba->pport)
5079 phba->pport->work_port_events = 0;
5080 phba->sli4_hba.intr_enable = 0;
5081 }
5082
5083 /**
5084 * lpfc_stop_port - Wrapper function for stopping hba port
5085 * @phba: Pointer to HBA context object.
5086 *
5087 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
5088 * the API jump table function pointer from the lpfc_hba struct.
5089 **/
5090 void
lpfc_stop_port(struct lpfc_hba * phba)5091 lpfc_stop_port(struct lpfc_hba *phba)
5092 {
5093 phba->lpfc_stop_port(phba);
5094
5095 if (phba->wq)
5096 flush_workqueue(phba->wq);
5097 }
5098
5099 /**
5100 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5101 * @phba: Pointer to hba for which this call is being executed.
5102 *
5103 * This routine starts the timer waiting for the FCF rediscovery to complete.
5104 **/
5105 void
lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba * phba)5106 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5107 {
5108 unsigned long fcf_redisc_wait_tmo =
5109 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5110 /* Start fcf rediscovery wait period timer */
5111 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5112 spin_lock_irq(&phba->hbalock);
5113 /* Allow action to new fcf asynchronous event */
5114 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5115 /* Mark the FCF rediscovery pending state */
5116 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5117 spin_unlock_irq(&phba->hbalock);
5118 }
5119
5120 /**
5121 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5122 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5123 *
5124 * This routine is invoked when waiting for FCF table rediscover has been
5125 * timed out. If new FCF record(s) has (have) been discovered during the
5126 * wait period, a new FCF event shall be added to the FCOE async event
5127 * list, and then worker thread shall be waked up for processing from the
5128 * worker thread context.
5129 **/
5130 static void
lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list * t)5131 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5132 {
5133 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5134
5135 /* Don't send FCF rediscovery event if timer cancelled */
5136 spin_lock_irq(&phba->hbalock);
5137 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5138 spin_unlock_irq(&phba->hbalock);
5139 return;
5140 }
5141 /* Clear FCF rediscovery timer pending flag */
5142 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5143 /* FCF rediscovery event to worker thread */
5144 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5145 spin_unlock_irq(&phba->hbalock);
5146 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5147 "2776 FCF rediscover quiescent timer expired\n");
5148 /* wake up worker thread */
5149 lpfc_worker_wake_up(phba);
5150 }
5151
5152 /**
5153 * lpfc_vmid_poll - VMID timeout detection
5154 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5155 *
5156 * This routine is invoked when there is no I/O on by a VM for the specified
5157 * amount of time. When this situation is detected, the VMID has to be
5158 * deregistered from the switch and all the local resources freed. The VMID
5159 * will be reassigned to the VM once the I/O begins.
5160 **/
5161 static void
lpfc_vmid_poll(struct timer_list * t)5162 lpfc_vmid_poll(struct timer_list *t)
5163 {
5164 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5165 u32 wake_up = 0;
5166
5167 /* check if there is a need to issue QFPA */
5168 if (phba->pport->vmid_priority_tagging) {
5169 wake_up = 1;
5170 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5171 }
5172
5173 /* Is the vmid inactivity timer enabled */
5174 if (phba->pport->vmid_inactivity_timeout ||
5175 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5176 wake_up = 1;
5177 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5178 }
5179
5180 if (wake_up)
5181 lpfc_worker_wake_up(phba);
5182
5183 /* restart the timer for the next iteration */
5184 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5185 LPFC_VMID_TIMER));
5186 }
5187
5188 /**
5189 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5190 * @phba: pointer to lpfc hba data structure.
5191 * @acqe_link: pointer to the async link completion queue entry.
5192 *
5193 * This routine is to parse the SLI4 link-attention link fault code.
5194 **/
5195 static void
lpfc_sli4_parse_latt_fault(struct lpfc_hba * phba,struct lpfc_acqe_link * acqe_link)5196 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5197 struct lpfc_acqe_link *acqe_link)
5198 {
5199 switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) {
5200 case LPFC_FC_LA_TYPE_LINK_DOWN:
5201 case LPFC_FC_LA_TYPE_TRUNKING_EVENT:
5202 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
5203 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
5204 break;
5205 default:
5206 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5207 case LPFC_ASYNC_LINK_FAULT_NONE:
5208 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5209 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5210 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5211 break;
5212 default:
5213 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5214 "0398 Unknown link fault code: x%x\n",
5215 bf_get(lpfc_acqe_link_fault, acqe_link));
5216 break;
5217 }
5218 break;
5219 }
5220 }
5221
5222 /**
5223 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5224 * @phba: pointer to lpfc hba data structure.
5225 * @acqe_link: pointer to the async link completion queue entry.
5226 *
5227 * This routine is to parse the SLI4 link attention type and translate it
5228 * into the base driver's link attention type coding.
5229 *
5230 * Return: Link attention type in terms of base driver's coding.
5231 **/
5232 static uint8_t
lpfc_sli4_parse_latt_type(struct lpfc_hba * phba,struct lpfc_acqe_link * acqe_link)5233 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5234 struct lpfc_acqe_link *acqe_link)
5235 {
5236 uint8_t att_type;
5237
5238 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5239 case LPFC_ASYNC_LINK_STATUS_DOWN:
5240 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5241 att_type = LPFC_ATT_LINK_DOWN;
5242 break;
5243 case LPFC_ASYNC_LINK_STATUS_UP:
5244 /* Ignore physical link up events - wait for logical link up */
5245 att_type = LPFC_ATT_RESERVED;
5246 break;
5247 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5248 att_type = LPFC_ATT_LINK_UP;
5249 break;
5250 default:
5251 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5252 "0399 Invalid link attention type: x%x\n",
5253 bf_get(lpfc_acqe_link_status, acqe_link));
5254 att_type = LPFC_ATT_RESERVED;
5255 break;
5256 }
5257 return att_type;
5258 }
5259
5260 /**
5261 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5262 * @phba: pointer to lpfc hba data structure.
5263 *
5264 * This routine is to get an SLI3 FC port's link speed in Mbps.
5265 *
5266 * Return: link speed in terms of Mbps.
5267 **/
5268 uint32_t
lpfc_sli_port_speed_get(struct lpfc_hba * phba)5269 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5270 {
5271 uint32_t link_speed;
5272
5273 if (!lpfc_is_link_up(phba))
5274 return 0;
5275
5276 if (phba->sli_rev <= LPFC_SLI_REV3) {
5277 switch (phba->fc_linkspeed) {
5278 case LPFC_LINK_SPEED_1GHZ:
5279 link_speed = 1000;
5280 break;
5281 case LPFC_LINK_SPEED_2GHZ:
5282 link_speed = 2000;
5283 break;
5284 case LPFC_LINK_SPEED_4GHZ:
5285 link_speed = 4000;
5286 break;
5287 case LPFC_LINK_SPEED_8GHZ:
5288 link_speed = 8000;
5289 break;
5290 case LPFC_LINK_SPEED_10GHZ:
5291 link_speed = 10000;
5292 break;
5293 case LPFC_LINK_SPEED_16GHZ:
5294 link_speed = 16000;
5295 break;
5296 default:
5297 link_speed = 0;
5298 }
5299 } else {
5300 if (phba->sli4_hba.link_state.logical_speed)
5301 link_speed =
5302 phba->sli4_hba.link_state.logical_speed;
5303 else
5304 link_speed = phba->sli4_hba.link_state.speed;
5305 }
5306 return link_speed;
5307 }
5308
5309 /**
5310 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5311 * @phba: pointer to lpfc hba data structure.
5312 * @evt_code: asynchronous event code.
5313 * @speed_code: asynchronous event link speed code.
5314 *
5315 * This routine is to parse the giving SLI4 async event link speed code into
5316 * value of Mbps for the link speed.
5317 *
5318 * Return: link speed in terms of Mbps.
5319 **/
5320 static uint32_t
lpfc_sli4_port_speed_parse(struct lpfc_hba * phba,uint32_t evt_code,uint8_t speed_code)5321 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5322 uint8_t speed_code)
5323 {
5324 uint32_t port_speed;
5325
5326 switch (evt_code) {
5327 case LPFC_TRAILER_CODE_LINK:
5328 switch (speed_code) {
5329 case LPFC_ASYNC_LINK_SPEED_ZERO:
5330 port_speed = 0;
5331 break;
5332 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5333 port_speed = 10;
5334 break;
5335 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5336 port_speed = 100;
5337 break;
5338 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5339 port_speed = 1000;
5340 break;
5341 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5342 port_speed = 10000;
5343 break;
5344 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5345 port_speed = 20000;
5346 break;
5347 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5348 port_speed = 25000;
5349 break;
5350 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5351 port_speed = 40000;
5352 break;
5353 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5354 port_speed = 100000;
5355 break;
5356 default:
5357 port_speed = 0;
5358 }
5359 break;
5360 case LPFC_TRAILER_CODE_FC:
5361 switch (speed_code) {
5362 case LPFC_FC_LA_SPEED_UNKNOWN:
5363 port_speed = 0;
5364 break;
5365 case LPFC_FC_LA_SPEED_1G:
5366 port_speed = 1000;
5367 break;
5368 case LPFC_FC_LA_SPEED_2G:
5369 port_speed = 2000;
5370 break;
5371 case LPFC_FC_LA_SPEED_4G:
5372 port_speed = 4000;
5373 break;
5374 case LPFC_FC_LA_SPEED_8G:
5375 port_speed = 8000;
5376 break;
5377 case LPFC_FC_LA_SPEED_10G:
5378 port_speed = 10000;
5379 break;
5380 case LPFC_FC_LA_SPEED_16G:
5381 port_speed = 16000;
5382 break;
5383 case LPFC_FC_LA_SPEED_32G:
5384 port_speed = 32000;
5385 break;
5386 case LPFC_FC_LA_SPEED_64G:
5387 port_speed = 64000;
5388 break;
5389 case LPFC_FC_LA_SPEED_128G:
5390 port_speed = 128000;
5391 break;
5392 case LPFC_FC_LA_SPEED_256G:
5393 port_speed = 256000;
5394 break;
5395 default:
5396 port_speed = 0;
5397 }
5398 break;
5399 default:
5400 port_speed = 0;
5401 }
5402 return port_speed;
5403 }
5404
5405 /**
5406 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5407 * @phba: pointer to lpfc hba data structure.
5408 * @acqe_link: pointer to the async link completion queue entry.
5409 *
5410 * This routine is to handle the SLI4 asynchronous FCoE link event.
5411 **/
5412 static void
lpfc_sli4_async_link_evt(struct lpfc_hba * phba,struct lpfc_acqe_link * acqe_link)5413 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5414 struct lpfc_acqe_link *acqe_link)
5415 {
5416 LPFC_MBOXQ_t *pmb;
5417 MAILBOX_t *mb;
5418 struct lpfc_mbx_read_top *la;
5419 uint8_t att_type;
5420 int rc;
5421
5422 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5423 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5424 return;
5425 phba->fcoe_eventtag = acqe_link->event_tag;
5426 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5427 if (!pmb) {
5428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5429 "0395 The mboxq allocation failed\n");
5430 return;
5431 }
5432
5433 rc = lpfc_mbox_rsrc_prep(phba, pmb);
5434 if (rc) {
5435 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5436 "0396 mailbox allocation failed\n");
5437 goto out_free_pmb;
5438 }
5439
5440 /* Cleanup any outstanding ELS commands */
5441 lpfc_els_flush_all_cmd(phba);
5442
5443 /* Block ELS IOCBs until we have done process link event */
5444 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5445
5446 /* Update link event statistics */
5447 phba->sli.slistat.link_event++;
5448
5449 /* Create lpfc_handle_latt mailbox command from link ACQE */
5450 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
5451 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5452 pmb->vport = phba->pport;
5453
5454 /* Keep the link status for extra SLI4 state machine reference */
5455 phba->sli4_hba.link_state.speed =
5456 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5457 bf_get(lpfc_acqe_link_speed, acqe_link));
5458 phba->sli4_hba.link_state.duplex =
5459 bf_get(lpfc_acqe_link_duplex, acqe_link);
5460 phba->sli4_hba.link_state.status =
5461 bf_get(lpfc_acqe_link_status, acqe_link);
5462 phba->sli4_hba.link_state.type =
5463 bf_get(lpfc_acqe_link_type, acqe_link);
5464 phba->sli4_hba.link_state.number =
5465 bf_get(lpfc_acqe_link_number, acqe_link);
5466 phba->sli4_hba.link_state.fault =
5467 bf_get(lpfc_acqe_link_fault, acqe_link);
5468 phba->sli4_hba.link_state.logical_speed =
5469 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5470
5471 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5472 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5473 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5474 "Logical speed:%dMbps Fault:%d\n",
5475 phba->sli4_hba.link_state.speed,
5476 phba->sli4_hba.link_state.topology,
5477 phba->sli4_hba.link_state.status,
5478 phba->sli4_hba.link_state.type,
5479 phba->sli4_hba.link_state.number,
5480 phba->sli4_hba.link_state.logical_speed,
5481 phba->sli4_hba.link_state.fault);
5482 /*
5483 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5484 * topology info. Note: Optional for non FC-AL ports.
5485 */
5486 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5487 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5488 if (rc == MBX_NOT_FINISHED)
5489 goto out_free_pmb;
5490 return;
5491 }
5492 /*
5493 * For FCoE Mode: fill in all the topology information we need and call
5494 * the READ_TOPOLOGY completion routine to continue without actually
5495 * sending the READ_TOPOLOGY mailbox command to the port.
5496 */
5497 /* Initialize completion status */
5498 mb = &pmb->u.mb;
5499 mb->mbxStatus = MBX_SUCCESS;
5500
5501 /* Parse port fault information field */
5502 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5503
5504 /* Parse and translate link attention fields */
5505 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5506 la->eventTag = acqe_link->event_tag;
5507 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5508 bf_set(lpfc_mbx_read_top_link_spd, la,
5509 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5510
5511 /* Fake the following irrelevant fields */
5512 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5513 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5514 bf_set(lpfc_mbx_read_top_il, la, 0);
5515 bf_set(lpfc_mbx_read_top_pb, la, 0);
5516 bf_set(lpfc_mbx_read_top_fa, la, 0);
5517 bf_set(lpfc_mbx_read_top_mm, la, 0);
5518
5519 /* Invoke the lpfc_handle_latt mailbox command callback function */
5520 lpfc_mbx_cmpl_read_topology(phba, pmb);
5521
5522 return;
5523
5524 out_free_pmb:
5525 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5526 }
5527
5528 /**
5529 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5530 * topology.
5531 * @phba: pointer to lpfc hba data structure.
5532 * @speed_code: asynchronous event link speed code.
5533 *
5534 * This routine is to parse the giving SLI4 async event link speed code into
5535 * value of Read topology link speed.
5536 *
5537 * Return: link speed in terms of Read topology.
5538 **/
5539 static uint8_t
lpfc_async_link_speed_to_read_top(struct lpfc_hba * phba,uint8_t speed_code)5540 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5541 {
5542 uint8_t port_speed;
5543
5544 switch (speed_code) {
5545 case LPFC_FC_LA_SPEED_1G:
5546 port_speed = LPFC_LINK_SPEED_1GHZ;
5547 break;
5548 case LPFC_FC_LA_SPEED_2G:
5549 port_speed = LPFC_LINK_SPEED_2GHZ;
5550 break;
5551 case LPFC_FC_LA_SPEED_4G:
5552 port_speed = LPFC_LINK_SPEED_4GHZ;
5553 break;
5554 case LPFC_FC_LA_SPEED_8G:
5555 port_speed = LPFC_LINK_SPEED_8GHZ;
5556 break;
5557 case LPFC_FC_LA_SPEED_16G:
5558 port_speed = LPFC_LINK_SPEED_16GHZ;
5559 break;
5560 case LPFC_FC_LA_SPEED_32G:
5561 port_speed = LPFC_LINK_SPEED_32GHZ;
5562 break;
5563 case LPFC_FC_LA_SPEED_64G:
5564 port_speed = LPFC_LINK_SPEED_64GHZ;
5565 break;
5566 case LPFC_FC_LA_SPEED_128G:
5567 port_speed = LPFC_LINK_SPEED_128GHZ;
5568 break;
5569 case LPFC_FC_LA_SPEED_256G:
5570 port_speed = LPFC_LINK_SPEED_256GHZ;
5571 break;
5572 default:
5573 port_speed = 0;
5574 break;
5575 }
5576
5577 return port_speed;
5578 }
5579
5580 void
lpfc_cgn_dump_rxmonitor(struct lpfc_hba * phba)5581 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5582 {
5583 if (!phba->rx_monitor) {
5584 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5585 "4411 Rx Monitor Info is empty.\n");
5586 } else {
5587 lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
5588 LPFC_MAX_RXMONITOR_DUMP);
5589 }
5590 }
5591
5592 /**
5593 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5594 * @phba: pointer to lpfc hba data structure.
5595 * @dtag: FPIN descriptor received
5596 *
5597 * Increment the FPIN received counter/time when it happens.
5598 */
5599 void
lpfc_cgn_update_stat(struct lpfc_hba * phba,uint32_t dtag)5600 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5601 {
5602 struct lpfc_cgn_info *cp;
5603 u32 value;
5604
5605 /* Make sure we have a congestion info buffer */
5606 if (!phba->cgn_i)
5607 return;
5608 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5609
5610 /* Update congestion statistics */
5611 switch (dtag) {
5612 case ELS_DTAG_LNK_INTEGRITY:
5613 le32_add_cpu(&cp->link_integ_notification, 1);
5614 lpfc_cgn_update_tstamp(phba, &cp->stat_lnk);
5615 break;
5616 case ELS_DTAG_DELIVERY:
5617 le32_add_cpu(&cp->delivery_notification, 1);
5618 lpfc_cgn_update_tstamp(phba, &cp->stat_delivery);
5619 break;
5620 case ELS_DTAG_PEER_CONGEST:
5621 le32_add_cpu(&cp->cgn_peer_notification, 1);
5622 lpfc_cgn_update_tstamp(phba, &cp->stat_peer);
5623 break;
5624 case ELS_DTAG_CONGESTION:
5625 le32_add_cpu(&cp->cgn_notification, 1);
5626 lpfc_cgn_update_tstamp(phba, &cp->stat_fpin);
5627 }
5628 if (phba->cgn_fpin_frequency &&
5629 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5630 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5631 cp->cgn_stat_npm = value;
5632 }
5633
5634 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5635 LPFC_CGN_CRC32_SEED);
5636 cp->cgn_info_crc = cpu_to_le32(value);
5637 }
5638
5639 /**
5640 * lpfc_cgn_update_tstamp - Update cmf timestamp
5641 * @phba: pointer to lpfc hba data structure.
5642 * @ts: structure to write the timestamp to.
5643 */
5644 void
lpfc_cgn_update_tstamp(struct lpfc_hba * phba,struct lpfc_cgn_ts * ts)5645 lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts)
5646 {
5647 struct timespec64 cur_time;
5648 struct tm tm_val;
5649
5650 ktime_get_real_ts64(&cur_time);
5651 time64_to_tm(cur_time.tv_sec, 0, &tm_val);
5652
5653 ts->month = tm_val.tm_mon + 1;
5654 ts->day = tm_val.tm_mday;
5655 ts->year = tm_val.tm_year - 100;
5656 ts->hour = tm_val.tm_hour;
5657 ts->minute = tm_val.tm_min;
5658 ts->second = tm_val.tm_sec;
5659
5660 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5661 "2646 Updated CMF timestamp : "
5662 "%u/%u/%u %u:%u:%u\n",
5663 ts->day, ts->month,
5664 ts->year, ts->hour,
5665 ts->minute, ts->second);
5666 }
5667
5668 /**
5669 * lpfc_cmf_stats_timer - Save data into registered congestion buffer
5670 * @timer: Timer cookie to access lpfc private data
5671 *
5672 * Save the congestion event data every minute.
5673 * On the hour collapse all the minute data into hour data. Every day
5674 * collapse all the hour data into daily data. Separate driver
5675 * and fabrc congestion event counters that will be saved out
5676 * to the registered congestion buffer every minute.
5677 */
5678 static enum hrtimer_restart
lpfc_cmf_stats_timer(struct hrtimer * timer)5679 lpfc_cmf_stats_timer(struct hrtimer *timer)
5680 {
5681 struct lpfc_hba *phba;
5682 struct lpfc_cgn_info *cp;
5683 uint32_t i, index;
5684 uint16_t value, mvalue;
5685 uint64_t bps;
5686 uint32_t mbps;
5687 uint32_t dvalue, wvalue, lvalue, avalue;
5688 uint64_t latsum;
5689 __le16 *ptr;
5690 __le32 *lptr;
5691 __le16 *mptr;
5692
5693 phba = container_of(timer, struct lpfc_hba, cmf_stats_timer);
5694 /* Make sure we have a congestion info buffer */
5695 if (!phba->cgn_i)
5696 return HRTIMER_NORESTART;
5697 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5698
5699 phba->cgn_evt_timestamp = jiffies +
5700 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5701 phba->cgn_evt_minute++;
5702
5703 /* We should get to this point in the routine on 1 minute intervals */
5704 lpfc_cgn_update_tstamp(phba, &cp->base_time);
5705
5706 if (phba->cgn_fpin_frequency &&
5707 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5708 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5709 cp->cgn_stat_npm = value;
5710 }
5711
5712 /* Read and clear the latency counters for this minute */
5713 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5714 latsum = atomic64_read(&phba->cgn_latency_evt);
5715 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5716 atomic64_set(&phba->cgn_latency_evt, 0);
5717
5718 /* We need to store MB/sec bandwidth in the congestion information.
5719 * block_cnt is count of 512 byte blocks for the entire minute,
5720 * bps will get bytes per sec before finally converting to MB/sec.
5721 */
5722 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5723 phba->rx_block_cnt = 0;
5724 mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5725
5726 /* Every minute */
5727 /* cgn parameters */
5728 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5729 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5730 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5731 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5732
5733 /* Fill in default LUN qdepth */
5734 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5735 cp->cgn_lunq = cpu_to_le16(value);
5736
5737 /* Record congestion buffer info - every minute
5738 * cgn_driver_evt_cnt (Driver events)
5739 * cgn_fabric_warn_cnt (Congestion Warnings)
5740 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5741 * cgn_fabric_alarm_cnt (Congestion Alarms)
5742 */
5743 index = ++cp->cgn_index_minute;
5744 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5745 cp->cgn_index_minute = 0;
5746 index = 0;
5747 }
5748
5749 /* Get the number of driver events in this sample and reset counter */
5750 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5751 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5752
5753 /* Get the number of warning events - FPIN and Signal for this minute */
5754 wvalue = 0;
5755 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5756 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5757 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5758 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5759 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5760
5761 /* Get the number of alarm events - FPIN and Signal for this minute */
5762 avalue = 0;
5763 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5764 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5765 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5766 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5767
5768 /* Collect the driver, warning, alarm and latency counts for this
5769 * minute into the driver congestion buffer.
5770 */
5771 ptr = &cp->cgn_drvr_min[index];
5772 value = (uint16_t)dvalue;
5773 *ptr = cpu_to_le16(value);
5774
5775 ptr = &cp->cgn_warn_min[index];
5776 value = (uint16_t)wvalue;
5777 *ptr = cpu_to_le16(value);
5778
5779 ptr = &cp->cgn_alarm_min[index];
5780 value = (uint16_t)avalue;
5781 *ptr = cpu_to_le16(value);
5782
5783 lptr = &cp->cgn_latency_min[index];
5784 if (lvalue) {
5785 lvalue = (uint32_t)div_u64(latsum, lvalue);
5786 *lptr = cpu_to_le32(lvalue);
5787 } else {
5788 *lptr = 0;
5789 }
5790
5791 /* Collect the bandwidth value into the driver's congesion buffer. */
5792 mptr = &cp->cgn_bw_min[index];
5793 *mptr = cpu_to_le16(mvalue);
5794
5795 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5796 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5797 index, dvalue, wvalue, *lptr, mvalue, avalue);
5798
5799 /* Every hour */
5800 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5801 /* Record congestion buffer info - every hour
5802 * Collapse all minutes into an hour
5803 */
5804 index = ++cp->cgn_index_hour;
5805 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5806 cp->cgn_index_hour = 0;
5807 index = 0;
5808 }
5809
5810 dvalue = 0;
5811 wvalue = 0;
5812 lvalue = 0;
5813 avalue = 0;
5814 mvalue = 0;
5815 mbps = 0;
5816 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5817 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5818 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5819 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5820 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5821 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5822 }
5823 if (lvalue) /* Avg of latency averages */
5824 lvalue /= LPFC_MIN_HOUR;
5825 if (mbps) /* Avg of Bandwidth averages */
5826 mvalue = mbps / LPFC_MIN_HOUR;
5827
5828 lptr = &cp->cgn_drvr_hr[index];
5829 *lptr = cpu_to_le32(dvalue);
5830 lptr = &cp->cgn_warn_hr[index];
5831 *lptr = cpu_to_le32(wvalue);
5832 lptr = &cp->cgn_latency_hr[index];
5833 *lptr = cpu_to_le32(lvalue);
5834 mptr = &cp->cgn_bw_hr[index];
5835 *mptr = cpu_to_le16(mvalue);
5836 lptr = &cp->cgn_alarm_hr[index];
5837 *lptr = cpu_to_le32(avalue);
5838
5839 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5840 "2419 Congestion Info - hour "
5841 "(%d): %d %d %d %d %d\n",
5842 index, dvalue, wvalue, lvalue, mvalue, avalue);
5843 }
5844
5845 /* Every day */
5846 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5847 /* Record congestion buffer info - every hour
5848 * Collapse all hours into a day. Rotate days
5849 * after LPFC_MAX_CGN_DAYS.
5850 */
5851 index = ++cp->cgn_index_day;
5852 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5853 cp->cgn_index_day = 0;
5854 index = 0;
5855 }
5856
5857 dvalue = 0;
5858 wvalue = 0;
5859 lvalue = 0;
5860 mvalue = 0;
5861 mbps = 0;
5862 avalue = 0;
5863 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5864 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5865 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5866 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5867 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5868 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5869 }
5870 if (lvalue) /* Avg of latency averages */
5871 lvalue /= LPFC_HOUR_DAY;
5872 if (mbps) /* Avg of Bandwidth averages */
5873 mvalue = mbps / LPFC_HOUR_DAY;
5874
5875 lptr = &cp->cgn_drvr_day[index];
5876 *lptr = cpu_to_le32(dvalue);
5877 lptr = &cp->cgn_warn_day[index];
5878 *lptr = cpu_to_le32(wvalue);
5879 lptr = &cp->cgn_latency_day[index];
5880 *lptr = cpu_to_le32(lvalue);
5881 mptr = &cp->cgn_bw_day[index];
5882 *mptr = cpu_to_le16(mvalue);
5883 lptr = &cp->cgn_alarm_day[index];
5884 *lptr = cpu_to_le32(avalue);
5885
5886 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5887 "2420 Congestion Info - daily (%d): "
5888 "%d %d %d %d %d\n",
5889 index, dvalue, wvalue, lvalue, mvalue, avalue);
5890 }
5891
5892 /* Use the frequency found in the last rcv'ed FPIN */
5893 value = phba->cgn_fpin_frequency;
5894 cp->cgn_warn_freq = cpu_to_le16(value);
5895 cp->cgn_alarm_freq = cpu_to_le16(value);
5896
5897 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5898 LPFC_CGN_CRC32_SEED);
5899 cp->cgn_info_crc = cpu_to_le32(lvalue);
5900
5901 hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC));
5902
5903 return HRTIMER_RESTART;
5904 }
5905
5906 /**
5907 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5908 * @phba: The Hba for which this call is being executed.
5909 *
5910 * The routine calculates the latency from the beginning of the CMF timer
5911 * interval to the current point in time. It is called from IO completion
5912 * when we exceed our Bandwidth limitation for the time interval.
5913 */
5914 uint32_t
lpfc_calc_cmf_latency(struct lpfc_hba * phba)5915 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5916 {
5917 struct timespec64 cmpl_time;
5918 uint32_t msec = 0;
5919
5920 ktime_get_real_ts64(&cmpl_time);
5921
5922 /* This routine works on a ms granularity so sec and usec are
5923 * converted accordingly.
5924 */
5925 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5926 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5927 NSEC_PER_MSEC;
5928 } else {
5929 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5930 msec = (cmpl_time.tv_sec -
5931 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5932 msec += ((cmpl_time.tv_nsec -
5933 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5934 } else {
5935 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5936 1) * MSEC_PER_SEC;
5937 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5938 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5939 }
5940 }
5941 return msec;
5942 }
5943
5944 /**
5945 * lpfc_cmf_timer - This is the timer function for one congestion
5946 * rate interval.
5947 * @timer: Pointer to the high resolution timer that expired
5948 */
5949 static enum hrtimer_restart
lpfc_cmf_timer(struct hrtimer * timer)5950 lpfc_cmf_timer(struct hrtimer *timer)
5951 {
5952 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5953 cmf_timer);
5954 struct rx_info_entry entry;
5955 uint32_t io_cnt;
5956 uint32_t busy, max_read;
5957 uint64_t total, rcv, lat, mbpi, extra, cnt;
5958 int timer_interval = LPFC_CMF_INTERVAL;
5959 uint32_t ms;
5960 struct lpfc_cgn_stat *cgs;
5961 int cpu;
5962
5963 /* Only restart the timer if congestion mgmt is on */
5964 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5965 !phba->cmf_latency.tv_sec) {
5966 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5967 "6224 CMF timer exit: %d %lld\n",
5968 phba->cmf_active_mode,
5969 (uint64_t)phba->cmf_latency.tv_sec);
5970 return HRTIMER_NORESTART;
5971 }
5972
5973 /* If pport is not ready yet, just exit and wait for
5974 * the next timer cycle to hit.
5975 */
5976 if (!phba->pport)
5977 goto skip;
5978
5979 /* Do not block SCSI IO while in the timer routine since
5980 * total_bytes will be cleared
5981 */
5982 atomic_set(&phba->cmf_stop_io, 1);
5983
5984 /* First we need to calculate the actual ms between
5985 * the last timer interrupt and this one. We ask for
5986 * LPFC_CMF_INTERVAL, however the actual time may
5987 * vary depending on system overhead.
5988 */
5989 ms = lpfc_calc_cmf_latency(phba);
5990
5991
5992 /* Immediately after we calculate the time since the last
5993 * timer interrupt, set the start time for the next
5994 * interrupt
5995 */
5996 ktime_get_real_ts64(&phba->cmf_latency);
5997
5998 phba->cmf_link_byte_count =
5999 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6000
6001 /* Collect all the stats from the prior timer interval */
6002 total = 0;
6003 io_cnt = 0;
6004 lat = 0;
6005 rcv = 0;
6006 for_each_present_cpu(cpu) {
6007 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6008 total += atomic64_xchg(&cgs->total_bytes, 0);
6009 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6010 lat += atomic64_xchg(&cgs->rx_latency, 0);
6011 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6012 }
6013
6014 /* Before we issue another CMF_SYNC_WQE, retrieve the BW
6015 * returned from the last CMF_SYNC_WQE issued, from
6016 * cmf_last_sync_bw. This will be the target BW for
6017 * this next timer interval.
6018 */
6019 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6020 phba->link_state != LPFC_LINK_DOWN &&
6021 phba->hba_flag & HBA_SETUP) {
6022 mbpi = phba->cmf_last_sync_bw;
6023 phba->cmf_last_sync_bw = 0;
6024 extra = 0;
6025
6026 /* Calculate any extra bytes needed to account for the
6027 * timer accuracy. If we are less than LPFC_CMF_INTERVAL
6028 * calculate the adjustment needed for total to reflect
6029 * a full LPFC_CMF_INTERVAL.
6030 */
6031 if (ms && ms < LPFC_CMF_INTERVAL) {
6032 cnt = div_u64(total, ms); /* bytes per ms */
6033 cnt *= LPFC_CMF_INTERVAL; /* what total should be */
6034 extra = cnt - total;
6035 }
6036 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6037 } else {
6038 /* For Monitor mode or link down we want mbpi
6039 * to be the full link speed
6040 */
6041 mbpi = phba->cmf_link_byte_count;
6042 extra = 0;
6043 }
6044 phba->cmf_timer_cnt++;
6045
6046 if (io_cnt) {
6047 /* Update congestion info buffer latency in us */
6048 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6049 atomic64_add(lat, &phba->cgn_latency_evt);
6050 }
6051 busy = atomic_xchg(&phba->cmf_busy, 0);
6052 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6053
6054 /* Calculate MBPI for the next timer interval */
6055 if (mbpi) {
6056 if (mbpi > phba->cmf_link_byte_count ||
6057 phba->cmf_active_mode == LPFC_CFG_MONITOR)
6058 mbpi = phba->cmf_link_byte_count;
6059
6060 /* Change max_bytes_per_interval to what the prior
6061 * CMF_SYNC_WQE cmpl indicated.
6062 */
6063 if (mbpi != phba->cmf_max_bytes_per_interval)
6064 phba->cmf_max_bytes_per_interval = mbpi;
6065 }
6066
6067 /* Save rxmonitor information for debug */
6068 if (phba->rx_monitor) {
6069 entry.total_bytes = total;
6070 entry.cmf_bytes = total + extra;
6071 entry.rcv_bytes = rcv;
6072 entry.cmf_busy = busy;
6073 entry.cmf_info = phba->cmf_active_info;
6074 if (io_cnt) {
6075 entry.avg_io_latency = div_u64(lat, io_cnt);
6076 entry.avg_io_size = div_u64(rcv, io_cnt);
6077 } else {
6078 entry.avg_io_latency = 0;
6079 entry.avg_io_size = 0;
6080 }
6081 entry.max_read_cnt = max_read;
6082 entry.io_cnt = io_cnt;
6083 entry.max_bytes_per_interval = mbpi;
6084 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6085 entry.timer_utilization = phba->cmf_last_ts;
6086 else
6087 entry.timer_utilization = ms;
6088 entry.timer_interval = ms;
6089 phba->cmf_last_ts = 0;
6090
6091 lpfc_rx_monitor_record(phba->rx_monitor, &entry);
6092 }
6093
6094 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6095 /* If Monitor mode, check if we are oversubscribed
6096 * against the full line rate.
6097 */
6098 if (mbpi && total > mbpi)
6099 atomic_inc(&phba->cgn_driver_evt_cnt);
6100 }
6101 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
6102
6103 /* Since total_bytes has already been zero'ed, its okay to unblock
6104 * after max_bytes_per_interval is setup.
6105 */
6106 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6107 queue_work(phba->wq, &phba->unblock_request_work);
6108
6109 /* SCSI IO is now unblocked */
6110 atomic_set(&phba->cmf_stop_io, 0);
6111
6112 skip:
6113 hrtimer_forward_now(timer,
6114 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6115 return HRTIMER_RESTART;
6116 }
6117
6118 #define trunk_link_status(__idx)\
6119 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6120 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6121 "Link up" : "Link down") : "NA"
6122 /* Did port __idx reported an error */
6123 #define trunk_port_fault(__idx)\
6124 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6125 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6126
6127 static void
lpfc_update_trunk_link_status(struct lpfc_hba * phba,struct lpfc_acqe_fc_la * acqe_fc)6128 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6129 struct lpfc_acqe_fc_la *acqe_fc)
6130 {
6131 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6132 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6133 u8 cnt = 0;
6134
6135 phba->sli4_hba.link_state.speed =
6136 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6137 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6138
6139 phba->sli4_hba.link_state.logical_speed =
6140 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6141 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6142 phba->fc_linkspeed =
6143 lpfc_async_link_speed_to_read_top(
6144 phba,
6145 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6146
6147 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6148 phba->trunk_link.link0.state =
6149 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6150 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6151 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6152 cnt++;
6153 }
6154 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6155 phba->trunk_link.link1.state =
6156 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6157 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6158 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6159 cnt++;
6160 }
6161 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6162 phba->trunk_link.link2.state =
6163 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6164 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6165 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6166 cnt++;
6167 }
6168 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6169 phba->trunk_link.link3.state =
6170 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6171 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6172 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6173 cnt++;
6174 }
6175
6176 if (cnt)
6177 phba->trunk_link.phy_lnk_speed =
6178 phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
6179 else
6180 phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
6181
6182 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6183 "2910 Async FC Trunking Event - Speed:%d\n"
6184 "\tLogical speed:%d "
6185 "port0: %s port1: %s port2: %s port3: %s\n",
6186 phba->sli4_hba.link_state.speed,
6187 phba->sli4_hba.link_state.logical_speed,
6188 trunk_link_status(0), trunk_link_status(1),
6189 trunk_link_status(2), trunk_link_status(3));
6190
6191 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6192 lpfc_cmf_signal_init(phba);
6193
6194 if (port_fault)
6195 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6196 "3202 trunk error:0x%x (%s) seen on port0:%s "
6197 /*
6198 * SLI-4: We have only 0xA error codes
6199 * defined as of now. print an appropriate
6200 * message in case driver needs to be updated.
6201 */
6202 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6203 "UNDEFINED. update driver." : trunk_errmsg[err],
6204 trunk_port_fault(0), trunk_port_fault(1),
6205 trunk_port_fault(2), trunk_port_fault(3));
6206 }
6207
6208
6209 /**
6210 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6211 * @phba: pointer to lpfc hba data structure.
6212 * @acqe_fc: pointer to the async fc completion queue entry.
6213 *
6214 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6215 * that the event was received and then issue a read_topology mailbox command so
6216 * that the rest of the driver will treat it the same as SLI3.
6217 **/
6218 static void
lpfc_sli4_async_fc_evt(struct lpfc_hba * phba,struct lpfc_acqe_fc_la * acqe_fc)6219 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6220 {
6221 LPFC_MBOXQ_t *pmb;
6222 MAILBOX_t *mb;
6223 struct lpfc_mbx_read_top *la;
6224 char *log_level;
6225 int rc;
6226
6227 if (bf_get(lpfc_trailer_type, acqe_fc) !=
6228 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6229 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6230 "2895 Non FC link Event detected.(%d)\n",
6231 bf_get(lpfc_trailer_type, acqe_fc));
6232 return;
6233 }
6234
6235 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6236 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6237 lpfc_update_trunk_link_status(phba, acqe_fc);
6238 return;
6239 }
6240
6241 /* Keep the link status for extra SLI4 state machine reference */
6242 phba->sli4_hba.link_state.speed =
6243 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6244 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6245 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6246 phba->sli4_hba.link_state.topology =
6247 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6248 phba->sli4_hba.link_state.status =
6249 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6250 phba->sli4_hba.link_state.type =
6251 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6252 phba->sli4_hba.link_state.number =
6253 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6254 phba->sli4_hba.link_state.fault =
6255 bf_get(lpfc_acqe_link_fault, acqe_fc);
6256 phba->sli4_hba.link_state.link_status =
6257 bf_get(lpfc_acqe_fc_la_link_status, acqe_fc);
6258
6259 /*
6260 * Only select attention types need logical speed modification to what
6261 * was previously set.
6262 */
6263 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP &&
6264 phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6265 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6266 LPFC_FC_LA_TYPE_LINK_DOWN)
6267 phba->sli4_hba.link_state.logical_speed = 0;
6268 else if (!phba->sli4_hba.conf_trunk)
6269 phba->sli4_hba.link_state.logical_speed =
6270 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6271 }
6272
6273 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6274 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6275 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6276 "%dMbps Fault:x%x Link Status:x%x\n",
6277 phba->sli4_hba.link_state.speed,
6278 phba->sli4_hba.link_state.topology,
6279 phba->sli4_hba.link_state.status,
6280 phba->sli4_hba.link_state.type,
6281 phba->sli4_hba.link_state.number,
6282 phba->sli4_hba.link_state.logical_speed,
6283 phba->sli4_hba.link_state.fault,
6284 phba->sli4_hba.link_state.link_status);
6285
6286 /*
6287 * The following attention types are informational only, providing
6288 * further details about link status. Overwrite the value of
6289 * link_state.status appropriately. No further action is required.
6290 */
6291 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6292 switch (phba->sli4_hba.link_state.status) {
6293 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
6294 log_level = KERN_WARNING;
6295 phba->sli4_hba.link_state.status =
6296 LPFC_FC_LA_TYPE_LINK_DOWN;
6297 break;
6298 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
6299 /*
6300 * During bb credit recovery establishment, receiving
6301 * this attention type is normal. Link Up attention
6302 * type is expected to occur before this informational
6303 * attention type so keep the Link Up status.
6304 */
6305 log_level = KERN_INFO;
6306 phba->sli4_hba.link_state.status =
6307 LPFC_FC_LA_TYPE_LINK_UP;
6308 break;
6309 default:
6310 log_level = KERN_INFO;
6311 break;
6312 }
6313 lpfc_log_msg(phba, log_level, LOG_SLI,
6314 "2992 Async FC event - Informational Link "
6315 "Attention Type x%x\n",
6316 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc));
6317 return;
6318 }
6319
6320 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6321 if (!pmb) {
6322 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6323 "2897 The mboxq allocation failed\n");
6324 return;
6325 }
6326 rc = lpfc_mbox_rsrc_prep(phba, pmb);
6327 if (rc) {
6328 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6329 "2898 The mboxq prep failed\n");
6330 goto out_free_pmb;
6331 }
6332
6333 /* Cleanup any outstanding ELS commands */
6334 lpfc_els_flush_all_cmd(phba);
6335
6336 /* Block ELS IOCBs until we have done process link event */
6337 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6338
6339 /* Update link event statistics */
6340 phba->sli.slistat.link_event++;
6341
6342 /* Create lpfc_handle_latt mailbox command from link ACQE */
6343 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
6344 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6345 pmb->vport = phba->pport;
6346
6347 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6348 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6349
6350 switch (phba->sli4_hba.link_state.status) {
6351 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6352 phba->link_flag |= LS_MDS_LINK_DOWN;
6353 break;
6354 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6355 phba->link_flag |= LS_MDS_LOOPBACK;
6356 break;
6357 default:
6358 break;
6359 }
6360
6361 /* Initialize completion status */
6362 mb = &pmb->u.mb;
6363 mb->mbxStatus = MBX_SUCCESS;
6364
6365 /* Parse port fault information field */
6366 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6367
6368 /* Parse and translate link attention fields */
6369 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6370 la->eventTag = acqe_fc->event_tag;
6371
6372 if (phba->sli4_hba.link_state.status ==
6373 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6374 bf_set(lpfc_mbx_read_top_att_type, la,
6375 LPFC_FC_LA_TYPE_UNEXP_WWPN);
6376 } else {
6377 bf_set(lpfc_mbx_read_top_att_type, la,
6378 LPFC_FC_LA_TYPE_LINK_DOWN);
6379 }
6380 /* Invoke the mailbox command callback function */
6381 lpfc_mbx_cmpl_read_topology(phba, pmb);
6382
6383 return;
6384 }
6385
6386 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6387 if (rc == MBX_NOT_FINISHED)
6388 goto out_free_pmb;
6389 return;
6390
6391 out_free_pmb:
6392 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6393 }
6394
6395 /**
6396 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6397 * @phba: pointer to lpfc hba data structure.
6398 * @acqe_sli: pointer to the async SLI completion queue entry.
6399 *
6400 * This routine is to handle the SLI4 asynchronous SLI events.
6401 **/
6402 static void
lpfc_sli4_async_sli_evt(struct lpfc_hba * phba,struct lpfc_acqe_sli * acqe_sli)6403 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6404 {
6405 char port_name;
6406 char message[128];
6407 uint8_t status;
6408 uint8_t evt_type;
6409 uint8_t operational = 0;
6410 struct temp_event temp_event_data;
6411 struct lpfc_acqe_misconfigured_event *misconfigured;
6412 struct lpfc_acqe_cgn_signal *cgn_signal;
6413 struct Scsi_Host *shost;
6414 struct lpfc_vport **vports;
6415 int rc, i, cnt;
6416
6417 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6418
6419 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6420 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6421 "x%08x x%08x x%08x\n", evt_type,
6422 acqe_sli->event_data1, acqe_sli->event_data2,
6423 acqe_sli->event_data3, acqe_sli->trailer);
6424
6425 port_name = phba->Port[0];
6426 if (port_name == 0x00)
6427 port_name = '?'; /* get port name is empty */
6428
6429 switch (evt_type) {
6430 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6431 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6432 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6433 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6434
6435 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6436 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6437 acqe_sli->event_data1, port_name);
6438
6439 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6440 shost = lpfc_shost_from_vport(phba->pport);
6441 fc_host_post_vendor_event(shost, fc_get_event_number(),
6442 sizeof(temp_event_data),
6443 (char *)&temp_event_data,
6444 SCSI_NL_VID_TYPE_PCI
6445 | PCI_VENDOR_ID_EMULEX);
6446 break;
6447 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6448 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6449 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6450 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6451
6452 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
6453 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6454 acqe_sli->event_data1, port_name);
6455
6456 shost = lpfc_shost_from_vport(phba->pport);
6457 fc_host_post_vendor_event(shost, fc_get_event_number(),
6458 sizeof(temp_event_data),
6459 (char *)&temp_event_data,
6460 SCSI_NL_VID_TYPE_PCI
6461 | PCI_VENDOR_ID_EMULEX);
6462 break;
6463 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6464 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6465 &acqe_sli->event_data1;
6466
6467 /* fetch the status for this port */
6468 switch (phba->sli4_hba.lnk_info.lnk_no) {
6469 case LPFC_LINK_NUMBER_0:
6470 status = bf_get(lpfc_sli_misconfigured_port0_state,
6471 &misconfigured->theEvent);
6472 operational = bf_get(lpfc_sli_misconfigured_port0_op,
6473 &misconfigured->theEvent);
6474 break;
6475 case LPFC_LINK_NUMBER_1:
6476 status = bf_get(lpfc_sli_misconfigured_port1_state,
6477 &misconfigured->theEvent);
6478 operational = bf_get(lpfc_sli_misconfigured_port1_op,
6479 &misconfigured->theEvent);
6480 break;
6481 case LPFC_LINK_NUMBER_2:
6482 status = bf_get(lpfc_sli_misconfigured_port2_state,
6483 &misconfigured->theEvent);
6484 operational = bf_get(lpfc_sli_misconfigured_port2_op,
6485 &misconfigured->theEvent);
6486 break;
6487 case LPFC_LINK_NUMBER_3:
6488 status = bf_get(lpfc_sli_misconfigured_port3_state,
6489 &misconfigured->theEvent);
6490 operational = bf_get(lpfc_sli_misconfigured_port3_op,
6491 &misconfigured->theEvent);
6492 break;
6493 default:
6494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6495 "3296 "
6496 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6497 "event: Invalid link %d",
6498 phba->sli4_hba.lnk_info.lnk_no);
6499 return;
6500 }
6501
6502 /* Skip if optic state unchanged */
6503 if (phba->sli4_hba.lnk_info.optic_state == status)
6504 return;
6505
6506 switch (status) {
6507 case LPFC_SLI_EVENT_STATUS_VALID:
6508 sprintf(message, "Physical Link is functional");
6509 break;
6510 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6511 sprintf(message, "Optics faulted/incorrectly "
6512 "installed/not installed - Reseat optics, "
6513 "if issue not resolved, replace.");
6514 break;
6515 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6516 sprintf(message,
6517 "Optics of two types installed - Remove one "
6518 "optic or install matching pair of optics.");
6519 break;
6520 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6521 sprintf(message, "Incompatible optics - Replace with "
6522 "compatible optics for card to function.");
6523 break;
6524 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6525 sprintf(message, "Unqualified optics - Replace with "
6526 "Avago optics for Warranty and Technical "
6527 "Support - Link is%s operational",
6528 (operational) ? " not" : "");
6529 break;
6530 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6531 sprintf(message, "Uncertified optics - Replace with "
6532 "Avago-certified optics to enable link "
6533 "operation - Link is%s operational",
6534 (operational) ? " not" : "");
6535 break;
6536 default:
6537 /* firmware is reporting a status we don't know about */
6538 sprintf(message, "Unknown event status x%02x", status);
6539 break;
6540 }
6541
6542 /* Issue READ_CONFIG mbox command to refresh supported speeds */
6543 rc = lpfc_sli4_read_config(phba);
6544 if (rc) {
6545 phba->lmt = 0;
6546 lpfc_printf_log(phba, KERN_ERR,
6547 LOG_TRACE_EVENT,
6548 "3194 Unable to retrieve supported "
6549 "speeds, rc = 0x%x\n", rc);
6550 }
6551 rc = lpfc_sli4_refresh_params(phba);
6552 if (rc) {
6553 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6554 "3174 Unable to update pls support, "
6555 "rc x%x\n", rc);
6556 }
6557 vports = lpfc_create_vport_work_array(phba);
6558 if (vports != NULL) {
6559 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6560 i++) {
6561 shost = lpfc_shost_from_vport(vports[i]);
6562 lpfc_host_supported_speeds_set(shost);
6563 }
6564 }
6565 lpfc_destroy_vport_work_array(phba, vports);
6566
6567 phba->sli4_hba.lnk_info.optic_state = status;
6568 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6569 "3176 Port Name %c %s\n", port_name, message);
6570 break;
6571 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6572 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6573 "3192 Remote DPort Test Initiated - "
6574 "Event Data1:x%08x Event Data2: x%08x\n",
6575 acqe_sli->event_data1, acqe_sli->event_data2);
6576 break;
6577 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6578 /* Call FW to obtain active parms */
6579 lpfc_sli4_cgn_parm_chg_evt(phba);
6580 break;
6581 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6582 /* Misconfigured WWN. Reports that the SLI Port is configured
6583 * to use FA-WWN, but the attached device doesn’t support it.
6584 * Event Data1 - N.A, Event Data2 - N.A
6585 * This event only happens on the physical port.
6586 */
6587 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6588 "2699 Misconfigured FA-PWWN - Attached device "
6589 "does not support FA-PWWN\n");
6590 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6591 memset(phba->pport->fc_portname.u.wwn, 0,
6592 sizeof(struct lpfc_name));
6593 break;
6594 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6595 /* EEPROM failure. No driver action is required */
6596 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6597 "2518 EEPROM failure - "
6598 "Event Data1: x%08x Event Data2: x%08x\n",
6599 acqe_sli->event_data1, acqe_sli->event_data2);
6600 break;
6601 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6602 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6603 break;
6604 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6605 &acqe_sli->event_data1;
6606 phba->cgn_acqe_cnt++;
6607
6608 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6609 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6610 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6611
6612 /* no threshold for CMF, even 1 signal will trigger an event */
6613
6614 /* Alarm overrides warning, so check that first */
6615 if (cgn_signal->alarm_cnt) {
6616 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6617 /* Keep track of alarm cnt for CMF_SYNC_WQE */
6618 atomic_add(cgn_signal->alarm_cnt,
6619 &phba->cgn_sync_alarm_cnt);
6620 }
6621 } else if (cnt) {
6622 /* signal action needs to be taken */
6623 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6624 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6625 /* Keep track of warning cnt for CMF_SYNC_WQE */
6626 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6627 }
6628 }
6629 break;
6630 case LPFC_SLI_EVENT_TYPE_RD_SIGNAL:
6631 /* May be accompanied by a temperature event */
6632 lpfc_printf_log(phba, KERN_INFO,
6633 LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT,
6634 "2902 Remote Degrade Signaling: x%08x x%08x "
6635 "x%08x\n",
6636 acqe_sli->event_data1, acqe_sli->event_data2,
6637 acqe_sli->event_data3);
6638 break;
6639 default:
6640 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6641 "3193 Unrecognized SLI event, type: 0x%x",
6642 evt_type);
6643 break;
6644 }
6645 }
6646
6647 /**
6648 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6649 * @vport: pointer to vport data structure.
6650 *
6651 * This routine is to perform Clear Virtual Link (CVL) on a vport in
6652 * response to a CVL event.
6653 *
6654 * Return the pointer to the ndlp with the vport if successful, otherwise
6655 * return NULL.
6656 **/
6657 static struct lpfc_nodelist *
lpfc_sli4_perform_vport_cvl(struct lpfc_vport * vport)6658 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6659 {
6660 struct lpfc_nodelist *ndlp;
6661 struct Scsi_Host *shost;
6662 struct lpfc_hba *phba;
6663
6664 if (!vport)
6665 return NULL;
6666 phba = vport->phba;
6667 if (!phba)
6668 return NULL;
6669 ndlp = lpfc_findnode_did(vport, Fabric_DID);
6670 if (!ndlp) {
6671 /* Cannot find existing Fabric ndlp, so allocate a new one */
6672 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6673 if (!ndlp)
6674 return NULL;
6675 /* Set the node type */
6676 ndlp->nlp_type |= NLP_FABRIC;
6677 /* Put ndlp onto node list */
6678 lpfc_enqueue_node(vport, ndlp);
6679 }
6680 if ((phba->pport->port_state < LPFC_FLOGI) &&
6681 (phba->pport->port_state != LPFC_VPORT_FAILED))
6682 return NULL;
6683 /* If virtual link is not yet instantiated ignore CVL */
6684 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6685 && (vport->port_state != LPFC_VPORT_FAILED))
6686 return NULL;
6687 shost = lpfc_shost_from_vport(vport);
6688 if (!shost)
6689 return NULL;
6690 lpfc_linkdown_port(vport);
6691 lpfc_cleanup_pending_mbox(vport);
6692 spin_lock_irq(shost->host_lock);
6693 vport->fc_flag |= FC_VPORT_CVL_RCVD;
6694 spin_unlock_irq(shost->host_lock);
6695
6696 return ndlp;
6697 }
6698
6699 /**
6700 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6701 * @phba: pointer to lpfc hba data structure.
6702 *
6703 * This routine is to perform Clear Virtual Link (CVL) on all vports in
6704 * response to a FCF dead event.
6705 **/
6706 static void
lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba * phba)6707 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6708 {
6709 struct lpfc_vport **vports;
6710 int i;
6711
6712 vports = lpfc_create_vport_work_array(phba);
6713 if (vports)
6714 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6715 lpfc_sli4_perform_vport_cvl(vports[i]);
6716 lpfc_destroy_vport_work_array(phba, vports);
6717 }
6718
6719 /**
6720 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6721 * @phba: pointer to lpfc hba data structure.
6722 * @acqe_fip: pointer to the async fcoe completion queue entry.
6723 *
6724 * This routine is to handle the SLI4 asynchronous fcoe event.
6725 **/
6726 static void
lpfc_sli4_async_fip_evt(struct lpfc_hba * phba,struct lpfc_acqe_fip * acqe_fip)6727 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6728 struct lpfc_acqe_fip *acqe_fip)
6729 {
6730 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6731 int rc;
6732 struct lpfc_vport *vport;
6733 struct lpfc_nodelist *ndlp;
6734 int active_vlink_present;
6735 struct lpfc_vport **vports;
6736 int i;
6737
6738 phba->fc_eventTag = acqe_fip->event_tag;
6739 phba->fcoe_eventtag = acqe_fip->event_tag;
6740 switch (event_type) {
6741 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6742 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6743 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6744 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6745 "2546 New FCF event, evt_tag:x%x, "
6746 "index:x%x\n",
6747 acqe_fip->event_tag,
6748 acqe_fip->index);
6749 else
6750 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6751 LOG_DISCOVERY,
6752 "2788 FCF param modified event, "
6753 "evt_tag:x%x, index:x%x\n",
6754 acqe_fip->event_tag,
6755 acqe_fip->index);
6756 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6757 /*
6758 * During period of FCF discovery, read the FCF
6759 * table record indexed by the event to update
6760 * FCF roundrobin failover eligible FCF bmask.
6761 */
6762 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6763 LOG_DISCOVERY,
6764 "2779 Read FCF (x%x) for updating "
6765 "roundrobin FCF failover bmask\n",
6766 acqe_fip->index);
6767 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6768 }
6769
6770 /* If the FCF discovery is in progress, do nothing. */
6771 spin_lock_irq(&phba->hbalock);
6772 if (phba->hba_flag & FCF_TS_INPROG) {
6773 spin_unlock_irq(&phba->hbalock);
6774 break;
6775 }
6776 /* If fast FCF failover rescan event is pending, do nothing */
6777 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6778 spin_unlock_irq(&phba->hbalock);
6779 break;
6780 }
6781
6782 /* If the FCF has been in discovered state, do nothing. */
6783 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6784 spin_unlock_irq(&phba->hbalock);
6785 break;
6786 }
6787 spin_unlock_irq(&phba->hbalock);
6788
6789 /* Otherwise, scan the entire FCF table and re-discover SAN */
6790 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6791 "2770 Start FCF table scan per async FCF "
6792 "event, evt_tag:x%x, index:x%x\n",
6793 acqe_fip->event_tag, acqe_fip->index);
6794 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6795 LPFC_FCOE_FCF_GET_FIRST);
6796 if (rc)
6797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6798 "2547 Issue FCF scan read FCF mailbox "
6799 "command failed (x%x)\n", rc);
6800 break;
6801
6802 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6804 "2548 FCF Table full count 0x%x tag 0x%x\n",
6805 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6806 acqe_fip->event_tag);
6807 break;
6808
6809 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6810 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6811 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6812 "2549 FCF (x%x) disconnected from network, "
6813 "tag:x%x\n", acqe_fip->index,
6814 acqe_fip->event_tag);
6815 /*
6816 * If we are in the middle of FCF failover process, clear
6817 * the corresponding FCF bit in the roundrobin bitmap.
6818 */
6819 spin_lock_irq(&phba->hbalock);
6820 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6821 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6822 spin_unlock_irq(&phba->hbalock);
6823 /* Update FLOGI FCF failover eligible FCF bmask */
6824 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6825 break;
6826 }
6827 spin_unlock_irq(&phba->hbalock);
6828
6829 /* If the event is not for currently used fcf do nothing */
6830 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6831 break;
6832
6833 /*
6834 * Otherwise, request the port to rediscover the entire FCF
6835 * table for a fast recovery from case that the current FCF
6836 * is no longer valid as we are not in the middle of FCF
6837 * failover process already.
6838 */
6839 spin_lock_irq(&phba->hbalock);
6840 /* Mark the fast failover process in progress */
6841 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6842 spin_unlock_irq(&phba->hbalock);
6843
6844 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6845 "2771 Start FCF fast failover process due to "
6846 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6847 "\n", acqe_fip->event_tag, acqe_fip->index);
6848 rc = lpfc_sli4_redisc_fcf_table(phba);
6849 if (rc) {
6850 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6851 LOG_TRACE_EVENT,
6852 "2772 Issue FCF rediscover mailbox "
6853 "command failed, fail through to FCF "
6854 "dead event\n");
6855 spin_lock_irq(&phba->hbalock);
6856 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6857 spin_unlock_irq(&phba->hbalock);
6858 /*
6859 * Last resort will fail over by treating this
6860 * as a link down to FCF registration.
6861 */
6862 lpfc_sli4_fcf_dead_failthrough(phba);
6863 } else {
6864 /* Reset FCF roundrobin bmask for new discovery */
6865 lpfc_sli4_clear_fcf_rr_bmask(phba);
6866 /*
6867 * Handling fast FCF failover to a DEAD FCF event is
6868 * considered equalivant to receiving CVL to all vports.
6869 */
6870 lpfc_sli4_perform_all_vport_cvl(phba);
6871 }
6872 break;
6873 case LPFC_FIP_EVENT_TYPE_CVL:
6874 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6875 lpfc_printf_log(phba, KERN_ERR,
6876 LOG_TRACE_EVENT,
6877 "2718 Clear Virtual Link Received for VPI 0x%x"
6878 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6879
6880 vport = lpfc_find_vport_by_vpid(phba,
6881 acqe_fip->index);
6882 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6883 if (!ndlp)
6884 break;
6885 active_vlink_present = 0;
6886
6887 vports = lpfc_create_vport_work_array(phba);
6888 if (vports) {
6889 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6890 i++) {
6891 if ((!(vports[i]->fc_flag &
6892 FC_VPORT_CVL_RCVD)) &&
6893 (vports[i]->port_state > LPFC_FDISC)) {
6894 active_vlink_present = 1;
6895 break;
6896 }
6897 }
6898 lpfc_destroy_vport_work_array(phba, vports);
6899 }
6900
6901 /*
6902 * Don't re-instantiate if vport is marked for deletion.
6903 * If we are here first then vport_delete is going to wait
6904 * for discovery to complete.
6905 */
6906 if (!(vport->load_flag & FC_UNLOADING) &&
6907 active_vlink_present) {
6908 /*
6909 * If there are other active VLinks present,
6910 * re-instantiate the Vlink using FDISC.
6911 */
6912 mod_timer(&ndlp->nlp_delayfunc,
6913 jiffies + msecs_to_jiffies(1000));
6914 spin_lock_irq(&ndlp->lock);
6915 ndlp->nlp_flag |= NLP_DELAY_TMO;
6916 spin_unlock_irq(&ndlp->lock);
6917 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6918 vport->port_state = LPFC_FDISC;
6919 } else {
6920 /*
6921 * Otherwise, we request port to rediscover
6922 * the entire FCF table for a fast recovery
6923 * from possible case that the current FCF
6924 * is no longer valid if we are not already
6925 * in the FCF failover process.
6926 */
6927 spin_lock_irq(&phba->hbalock);
6928 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6929 spin_unlock_irq(&phba->hbalock);
6930 break;
6931 }
6932 /* Mark the fast failover process in progress */
6933 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6934 spin_unlock_irq(&phba->hbalock);
6935 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6936 LOG_DISCOVERY,
6937 "2773 Start FCF failover per CVL, "
6938 "evt_tag:x%x\n", acqe_fip->event_tag);
6939 rc = lpfc_sli4_redisc_fcf_table(phba);
6940 if (rc) {
6941 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6942 LOG_TRACE_EVENT,
6943 "2774 Issue FCF rediscover "
6944 "mailbox command failed, "
6945 "through to CVL event\n");
6946 spin_lock_irq(&phba->hbalock);
6947 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6948 spin_unlock_irq(&phba->hbalock);
6949 /*
6950 * Last resort will be re-try on the
6951 * the current registered FCF entry.
6952 */
6953 lpfc_retry_pport_discovery(phba);
6954 } else
6955 /*
6956 * Reset FCF roundrobin bmask for new
6957 * discovery.
6958 */
6959 lpfc_sli4_clear_fcf_rr_bmask(phba);
6960 }
6961 break;
6962 default:
6963 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6964 "0288 Unknown FCoE event type 0x%x event tag "
6965 "0x%x\n", event_type, acqe_fip->event_tag);
6966 break;
6967 }
6968 }
6969
6970 /**
6971 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6972 * @phba: pointer to lpfc hba data structure.
6973 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6974 *
6975 * This routine is to handle the SLI4 asynchronous dcbx event.
6976 **/
6977 static void
lpfc_sli4_async_dcbx_evt(struct lpfc_hba * phba,struct lpfc_acqe_dcbx * acqe_dcbx)6978 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6979 struct lpfc_acqe_dcbx *acqe_dcbx)
6980 {
6981 phba->fc_eventTag = acqe_dcbx->event_tag;
6982 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6983 "0290 The SLI4 DCBX asynchronous event is not "
6984 "handled yet\n");
6985 }
6986
6987 /**
6988 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6989 * @phba: pointer to lpfc hba data structure.
6990 * @acqe_grp5: pointer to the async grp5 completion queue entry.
6991 *
6992 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
6993 * is an asynchronous notified of a logical link speed change. The Port
6994 * reports the logical link speed in units of 10Mbps.
6995 **/
6996 static void
lpfc_sli4_async_grp5_evt(struct lpfc_hba * phba,struct lpfc_acqe_grp5 * acqe_grp5)6997 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6998 struct lpfc_acqe_grp5 *acqe_grp5)
6999 {
7000 uint16_t prev_ll_spd;
7001
7002 phba->fc_eventTag = acqe_grp5->event_tag;
7003 phba->fcoe_eventtag = acqe_grp5->event_tag;
7004 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7005 phba->sli4_hba.link_state.logical_speed =
7006 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
7007 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7008 "2789 GRP5 Async Event: Updating logical link speed "
7009 "from %dMbps to %dMbps\n", prev_ll_spd,
7010 phba->sli4_hba.link_state.logical_speed);
7011 }
7012
7013 /**
7014 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7015 * @phba: pointer to lpfc hba data structure.
7016 *
7017 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
7018 * is an asynchronous notification of a request to reset CM stats.
7019 **/
7020 static void
lpfc_sli4_async_cmstat_evt(struct lpfc_hba * phba)7021 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7022 {
7023 if (!phba->cgn_i)
7024 return;
7025 lpfc_init_congestion_stat(phba);
7026 }
7027
7028 /**
7029 * lpfc_cgn_params_val - Validate FW congestion parameters.
7030 * @phba: pointer to lpfc hba data structure.
7031 * @p_cfg_param: pointer to FW provided congestion parameters.
7032 *
7033 * This routine validates the congestion parameters passed
7034 * by the FW to the driver via an ACQE event.
7035 **/
7036 static void
lpfc_cgn_params_val(struct lpfc_hba * phba,struct lpfc_cgn_param * p_cfg_param)7037 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7038 {
7039 spin_lock_irq(&phba->hbalock);
7040
7041 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7042 LPFC_CFG_MONITOR)) {
7043 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7044 "6225 CMF mode param out of range: %d\n",
7045 p_cfg_param->cgn_param_mode);
7046 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7047 }
7048
7049 spin_unlock_irq(&phba->hbalock);
7050 }
7051
7052 static const char * const lpfc_cmf_mode_to_str[] = {
7053 "OFF",
7054 "MANAGED",
7055 "MONITOR",
7056 };
7057
7058 /**
7059 * lpfc_cgn_params_parse - Process a FW cong parm change event
7060 * @phba: pointer to lpfc hba data structure.
7061 * @p_cgn_param: pointer to a data buffer with the FW cong params.
7062 * @len: the size of pdata in bytes.
7063 *
7064 * This routine validates the congestion management buffer signature
7065 * from the FW, validates the contents and makes corrections for
7066 * valid, in-range values. If the signature magic is correct and
7067 * after parameter validation, the contents are copied to the driver's
7068 * @phba structure. If the magic is incorrect, an error message is
7069 * logged.
7070 **/
7071 static void
lpfc_cgn_params_parse(struct lpfc_hba * phba,struct lpfc_cgn_param * p_cgn_param,uint32_t len)7072 lpfc_cgn_params_parse(struct lpfc_hba *phba,
7073 struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7074 {
7075 struct lpfc_cgn_info *cp;
7076 uint32_t crc, oldmode;
7077 char acr_string[4] = {0};
7078
7079 /* Make sure the FW has encoded the correct magic number to
7080 * validate the congestion parameter in FW memory.
7081 */
7082 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7083 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7084 "4668 FW cgn parm buffer data: "
7085 "magic 0x%x version %d mode %d "
7086 "level0 %d level1 %d "
7087 "level2 %d byte13 %d "
7088 "byte14 %d byte15 %d "
7089 "byte11 %d byte12 %d activeMode %d\n",
7090 p_cgn_param->cgn_param_magic,
7091 p_cgn_param->cgn_param_version,
7092 p_cgn_param->cgn_param_mode,
7093 p_cgn_param->cgn_param_level0,
7094 p_cgn_param->cgn_param_level1,
7095 p_cgn_param->cgn_param_level2,
7096 p_cgn_param->byte13,
7097 p_cgn_param->byte14,
7098 p_cgn_param->byte15,
7099 p_cgn_param->byte11,
7100 p_cgn_param->byte12,
7101 phba->cmf_active_mode);
7102
7103 oldmode = phba->cmf_active_mode;
7104
7105 /* Any parameters out of range are corrected to defaults
7106 * by this routine. No need to fail.
7107 */
7108 lpfc_cgn_params_val(phba, p_cgn_param);
7109
7110 /* Parameters are verified, move them into driver storage */
7111 spin_lock_irq(&phba->hbalock);
7112 memcpy(&phba->cgn_p, p_cgn_param,
7113 sizeof(struct lpfc_cgn_param));
7114
7115 /* Update parameters in congestion info buffer now */
7116 if (phba->cgn_i) {
7117 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7118 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7119 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7120 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7121 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7122 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7123 LPFC_CGN_CRC32_SEED);
7124 cp->cgn_info_crc = cpu_to_le32(crc);
7125 }
7126 spin_unlock_irq(&phba->hbalock);
7127
7128 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7129
7130 switch (oldmode) {
7131 case LPFC_CFG_OFF:
7132 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7133 /* Turning CMF on */
7134 lpfc_cmf_start(phba);
7135
7136 if (phba->link_state >= LPFC_LINK_UP) {
7137 phba->cgn_reg_fpin =
7138 phba->cgn_init_reg_fpin;
7139 phba->cgn_reg_signal =
7140 phba->cgn_init_reg_signal;
7141 lpfc_issue_els_edc(phba->pport, 0);
7142 }
7143 }
7144 break;
7145 case LPFC_CFG_MANAGED:
7146 switch (phba->cgn_p.cgn_param_mode) {
7147 case LPFC_CFG_OFF:
7148 /* Turning CMF off */
7149 lpfc_cmf_stop(phba);
7150 if (phba->link_state >= LPFC_LINK_UP)
7151 lpfc_issue_els_edc(phba->pport, 0);
7152 break;
7153 case LPFC_CFG_MONITOR:
7154 phba->cmf_max_bytes_per_interval =
7155 phba->cmf_link_byte_count;
7156
7157 /* Resume blocked IO - unblock on workqueue */
7158 queue_work(phba->wq,
7159 &phba->unblock_request_work);
7160 break;
7161 }
7162 break;
7163 case LPFC_CFG_MONITOR:
7164 switch (phba->cgn_p.cgn_param_mode) {
7165 case LPFC_CFG_OFF:
7166 /* Turning CMF off */
7167 lpfc_cmf_stop(phba);
7168 if (phba->link_state >= LPFC_LINK_UP)
7169 lpfc_issue_els_edc(phba->pport, 0);
7170 break;
7171 case LPFC_CFG_MANAGED:
7172 lpfc_cmf_signal_init(phba);
7173 break;
7174 }
7175 break;
7176 }
7177 if (oldmode != LPFC_CFG_OFF ||
7178 oldmode != phba->cgn_p.cgn_param_mode) {
7179 if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
7180 scnprintf(acr_string, sizeof(acr_string), "%u",
7181 phba->cgn_p.cgn_param_level0);
7182 else
7183 scnprintf(acr_string, sizeof(acr_string), "NA");
7184
7185 dev_info(&phba->pcidev->dev, "%d: "
7186 "4663 CMF: Mode %s acr %s\n",
7187 phba->brd_no,
7188 lpfc_cmf_mode_to_str
7189 [phba->cgn_p.cgn_param_mode],
7190 acr_string);
7191 }
7192 } else {
7193 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7194 "4669 FW cgn parm buf wrong magic 0x%x "
7195 "version %d\n", p_cgn_param->cgn_param_magic,
7196 p_cgn_param->cgn_param_version);
7197 }
7198 }
7199
7200 /**
7201 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7202 * @phba: pointer to lpfc hba data structure.
7203 *
7204 * This routine issues a read_object mailbox command to
7205 * get the congestion management parameters from the FW
7206 * parses it and updates the driver maintained values.
7207 *
7208 * Returns
7209 * 0 if the object was empty
7210 * -Eval if an error was encountered
7211 * Count if bytes were read from object
7212 **/
7213 int
lpfc_sli4_cgn_params_read(struct lpfc_hba * phba)7214 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7215 {
7216 int ret = 0;
7217 struct lpfc_cgn_param *p_cgn_param = NULL;
7218 u32 *pdata = NULL;
7219 u32 len = 0;
7220
7221 /* Find out if the FW has a new set of congestion parameters. */
7222 len = sizeof(struct lpfc_cgn_param);
7223 pdata = kzalloc(len, GFP_KERNEL);
7224 if (!pdata)
7225 return -ENOMEM;
7226 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7227 pdata, len);
7228
7229 /* 0 means no data. A negative means error. A positive means
7230 * bytes were copied.
7231 */
7232 if (!ret) {
7233 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7234 "4670 CGN RD OBJ returns no data\n");
7235 goto rd_obj_err;
7236 } else if (ret < 0) {
7237 /* Some error. Just exit and return it to the caller.*/
7238 goto rd_obj_err;
7239 }
7240
7241 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7242 "6234 READ CGN PARAMS Successful %d\n", len);
7243
7244 /* Parse data pointer over len and update the phba congestion
7245 * parameters with values passed back. The receive rate values
7246 * may have been altered in FW, but take no action here.
7247 */
7248 p_cgn_param = (struct lpfc_cgn_param *)pdata;
7249 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7250
7251 rd_obj_err:
7252 kfree(pdata);
7253 return ret;
7254 }
7255
7256 /**
7257 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7258 * @phba: pointer to lpfc hba data structure.
7259 *
7260 * The FW generated Async ACQE SLI event calls this routine when
7261 * the event type is an SLI Internal Port Event and the Event Code
7262 * indicates a change to the FW maintained congestion parameters.
7263 *
7264 * This routine executes a Read_Object mailbox call to obtain the
7265 * current congestion parameters maintained in FW and corrects
7266 * the driver's active congestion parameters.
7267 *
7268 * The acqe event is not passed because there is no further data
7269 * required.
7270 *
7271 * Returns nonzero error if event processing encountered an error.
7272 * Zero otherwise for success.
7273 **/
7274 static int
lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba * phba)7275 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7276 {
7277 int ret = 0;
7278
7279 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7280 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7281 "4664 Cgn Evt when E2E off. Drop event\n");
7282 return -EACCES;
7283 }
7284
7285 /* If the event is claiming an empty object, it's ok. A write
7286 * could have cleared it. Only error is a negative return
7287 * status.
7288 */
7289 ret = lpfc_sli4_cgn_params_read(phba);
7290 if (ret < 0) {
7291 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7292 "4667 Error reading Cgn Params (%d)\n",
7293 ret);
7294 } else if (!ret) {
7295 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7296 "4673 CGN Event empty object.\n");
7297 }
7298 return ret;
7299 }
7300
7301 /**
7302 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7303 * @phba: pointer to lpfc hba data structure.
7304 *
7305 * This routine is invoked by the worker thread to process all the pending
7306 * SLI4 asynchronous events.
7307 **/
lpfc_sli4_async_event_proc(struct lpfc_hba * phba)7308 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7309 {
7310 struct lpfc_cq_event *cq_event;
7311 unsigned long iflags;
7312
7313 /* First, declare the async event has been handled */
7314 spin_lock_irqsave(&phba->hbalock, iflags);
7315 phba->hba_flag &= ~ASYNC_EVENT;
7316 spin_unlock_irqrestore(&phba->hbalock, iflags);
7317
7318 /* Now, handle all the async events */
7319 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7320 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7321 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7322 cq_event, struct lpfc_cq_event, list);
7323 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7324 iflags);
7325
7326 /* Process the asynchronous event */
7327 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7328 case LPFC_TRAILER_CODE_LINK:
7329 lpfc_sli4_async_link_evt(phba,
7330 &cq_event->cqe.acqe_link);
7331 break;
7332 case LPFC_TRAILER_CODE_FCOE:
7333 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7334 break;
7335 case LPFC_TRAILER_CODE_DCBX:
7336 lpfc_sli4_async_dcbx_evt(phba,
7337 &cq_event->cqe.acqe_dcbx);
7338 break;
7339 case LPFC_TRAILER_CODE_GRP5:
7340 lpfc_sli4_async_grp5_evt(phba,
7341 &cq_event->cqe.acqe_grp5);
7342 break;
7343 case LPFC_TRAILER_CODE_FC:
7344 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7345 break;
7346 case LPFC_TRAILER_CODE_SLI:
7347 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7348 break;
7349 case LPFC_TRAILER_CODE_CMSTAT:
7350 lpfc_sli4_async_cmstat_evt(phba);
7351 break;
7352 default:
7353 lpfc_printf_log(phba, KERN_ERR,
7354 LOG_TRACE_EVENT,
7355 "1804 Invalid asynchronous event code: "
7356 "x%x\n", bf_get(lpfc_trailer_code,
7357 &cq_event->cqe.mcqe_cmpl));
7358 break;
7359 }
7360
7361 /* Free the completion event processed to the free pool */
7362 lpfc_sli4_cq_event_release(phba, cq_event);
7363 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7364 }
7365 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7366 }
7367
7368 /**
7369 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7370 * @phba: pointer to lpfc hba data structure.
7371 *
7372 * This routine is invoked by the worker thread to process FCF table
7373 * rediscovery pending completion event.
7374 **/
lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba * phba)7375 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7376 {
7377 int rc;
7378
7379 spin_lock_irq(&phba->hbalock);
7380 /* Clear FCF rediscovery timeout event */
7381 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7382 /* Clear driver fast failover FCF record flag */
7383 phba->fcf.failover_rec.flag = 0;
7384 /* Set state for FCF fast failover */
7385 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7386 spin_unlock_irq(&phba->hbalock);
7387
7388 /* Scan FCF table from the first entry to re-discover SAN */
7389 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7390 "2777 Start post-quiescent FCF table scan\n");
7391 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7392 if (rc)
7393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7394 "2747 Issue FCF scan read FCF mailbox "
7395 "command failed 0x%x\n", rc);
7396 }
7397
7398 /**
7399 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7400 * @phba: pointer to lpfc hba data structure.
7401 * @dev_grp: The HBA PCI-Device group number.
7402 *
7403 * This routine is invoked to set up the per HBA PCI-Device group function
7404 * API jump table entries.
7405 *
7406 * Return: 0 if success, otherwise -ENODEV
7407 **/
7408 int
lpfc_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)7409 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7410 {
7411 int rc;
7412
7413 /* Set up lpfc PCI-device group */
7414 phba->pci_dev_grp = dev_grp;
7415
7416 /* The LPFC_PCI_DEV_OC uses SLI4 */
7417 if (dev_grp == LPFC_PCI_DEV_OC)
7418 phba->sli_rev = LPFC_SLI_REV4;
7419
7420 /* Set up device INIT API function jump table */
7421 rc = lpfc_init_api_table_setup(phba, dev_grp);
7422 if (rc)
7423 return -ENODEV;
7424 /* Set up SCSI API function jump table */
7425 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7426 if (rc)
7427 return -ENODEV;
7428 /* Set up SLI API function jump table */
7429 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7430 if (rc)
7431 return -ENODEV;
7432 /* Set up MBOX API function jump table */
7433 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7434 if (rc)
7435 return -ENODEV;
7436
7437 return 0;
7438 }
7439
7440 /**
7441 * lpfc_log_intr_mode - Log the active interrupt mode
7442 * @phba: pointer to lpfc hba data structure.
7443 * @intr_mode: active interrupt mode adopted.
7444 *
7445 * This routine it invoked to log the currently used active interrupt mode
7446 * to the device.
7447 **/
lpfc_log_intr_mode(struct lpfc_hba * phba,uint32_t intr_mode)7448 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7449 {
7450 switch (intr_mode) {
7451 case 0:
7452 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7453 "0470 Enable INTx interrupt mode.\n");
7454 break;
7455 case 1:
7456 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7457 "0481 Enabled MSI interrupt mode.\n");
7458 break;
7459 case 2:
7460 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7461 "0480 Enabled MSI-X interrupt mode.\n");
7462 break;
7463 default:
7464 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7465 "0482 Illegal interrupt mode.\n");
7466 break;
7467 }
7468 return;
7469 }
7470
7471 /**
7472 * lpfc_enable_pci_dev - Enable a generic PCI device.
7473 * @phba: pointer to lpfc hba data structure.
7474 *
7475 * This routine is invoked to enable the PCI device that is common to all
7476 * PCI devices.
7477 *
7478 * Return codes
7479 * 0 - successful
7480 * other values - error
7481 **/
7482 static int
lpfc_enable_pci_dev(struct lpfc_hba * phba)7483 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7484 {
7485 struct pci_dev *pdev;
7486
7487 /* Obtain PCI device reference */
7488 if (!phba->pcidev)
7489 goto out_error;
7490 else
7491 pdev = phba->pcidev;
7492 /* Enable PCI device */
7493 if (pci_enable_device_mem(pdev))
7494 goto out_error;
7495 /* Request PCI resource for the device */
7496 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7497 goto out_disable_device;
7498 /* Set up device as PCI master and save state for EEH */
7499 pci_set_master(pdev);
7500 pci_try_set_mwi(pdev);
7501 pci_save_state(pdev);
7502
7503 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7504 if (pci_is_pcie(pdev))
7505 pdev->needs_freset = 1;
7506
7507 return 0;
7508
7509 out_disable_device:
7510 pci_disable_device(pdev);
7511 out_error:
7512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7513 "1401 Failed to enable pci device\n");
7514 return -ENODEV;
7515 }
7516
7517 /**
7518 * lpfc_disable_pci_dev - Disable a generic PCI device.
7519 * @phba: pointer to lpfc hba data structure.
7520 *
7521 * This routine is invoked to disable the PCI device that is common to all
7522 * PCI devices.
7523 **/
7524 static void
lpfc_disable_pci_dev(struct lpfc_hba * phba)7525 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7526 {
7527 struct pci_dev *pdev;
7528
7529 /* Obtain PCI device reference */
7530 if (!phba->pcidev)
7531 return;
7532 else
7533 pdev = phba->pcidev;
7534 /* Release PCI resource and disable PCI device */
7535 pci_release_mem_regions(pdev);
7536 pci_disable_device(pdev);
7537
7538 return;
7539 }
7540
7541 /**
7542 * lpfc_reset_hba - Reset a hba
7543 * @phba: pointer to lpfc hba data structure.
7544 *
7545 * This routine is invoked to reset a hba device. It brings the HBA
7546 * offline, performs a board restart, and then brings the board back
7547 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7548 * on outstanding mailbox commands.
7549 **/
7550 void
lpfc_reset_hba(struct lpfc_hba * phba)7551 lpfc_reset_hba(struct lpfc_hba *phba)
7552 {
7553 int rc = 0;
7554
7555 /* If resets are disabled then set error state and return. */
7556 if (!phba->cfg_enable_hba_reset) {
7557 phba->link_state = LPFC_HBA_ERROR;
7558 return;
7559 }
7560
7561 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7562 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7563 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7564 } else {
7565 if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) {
7566 /* Perform a PCI function reset to start from clean */
7567 rc = lpfc_pci_function_reset(phba);
7568 lpfc_els_flush_all_cmd(phba);
7569 }
7570 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7571 lpfc_sli_flush_io_rings(phba);
7572 }
7573 lpfc_offline(phba);
7574 clear_bit(MBX_TMO_ERR, &phba->bit_flags);
7575 if (unlikely(rc)) {
7576 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7577 "8888 PCI function reset failed rc %x\n",
7578 rc);
7579 } else {
7580 lpfc_sli_brdrestart(phba);
7581 lpfc_online(phba);
7582 lpfc_unblock_mgmt_io(phba);
7583 }
7584 }
7585
7586 /**
7587 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7588 * @phba: pointer to lpfc hba data structure.
7589 *
7590 * This function enables the PCI SR-IOV virtual functions to a physical
7591 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7592 * enable the number of virtual functions to the physical function. As
7593 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7594 * API call does not considered as an error condition for most of the device.
7595 **/
7596 uint16_t
lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba * phba)7597 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7598 {
7599 struct pci_dev *pdev = phba->pcidev;
7600 uint16_t nr_virtfn;
7601 int pos;
7602
7603 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7604 if (pos == 0)
7605 return 0;
7606
7607 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7608 return nr_virtfn;
7609 }
7610
7611 /**
7612 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7613 * @phba: pointer to lpfc hba data structure.
7614 * @nr_vfn: number of virtual functions to be enabled.
7615 *
7616 * This function enables the PCI SR-IOV virtual functions to a physical
7617 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7618 * enable the number of virtual functions to the physical function. As
7619 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7620 * API call does not considered as an error condition for most of the device.
7621 **/
7622 int
lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba * phba,int nr_vfn)7623 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7624 {
7625 struct pci_dev *pdev = phba->pcidev;
7626 uint16_t max_nr_vfn;
7627 int rc;
7628
7629 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7630 if (nr_vfn > max_nr_vfn) {
7631 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7632 "3057 Requested vfs (%d) greater than "
7633 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7634 return -EINVAL;
7635 }
7636
7637 rc = pci_enable_sriov(pdev, nr_vfn);
7638 if (rc) {
7639 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7640 "2806 Failed to enable sriov on this device "
7641 "with vfn number nr_vf:%d, rc:%d\n",
7642 nr_vfn, rc);
7643 } else
7644 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7645 "2807 Successful enable sriov on this device "
7646 "with vfn number nr_vf:%d\n", nr_vfn);
7647 return rc;
7648 }
7649
7650 static void
lpfc_unblock_requests_work(struct work_struct * work)7651 lpfc_unblock_requests_work(struct work_struct *work)
7652 {
7653 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7654 unblock_request_work);
7655
7656 lpfc_unblock_requests(phba);
7657 }
7658
7659 /**
7660 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7661 * @phba: pointer to lpfc hba data structure.
7662 *
7663 * This routine is invoked to set up the driver internal resources before the
7664 * device specific resource setup to support the HBA device it attached to.
7665 *
7666 * Return codes
7667 * 0 - successful
7668 * other values - error
7669 **/
7670 static int
lpfc_setup_driver_resource_phase1(struct lpfc_hba * phba)7671 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7672 {
7673 struct lpfc_sli *psli = &phba->sli;
7674
7675 /*
7676 * Driver resources common to all SLI revisions
7677 */
7678 atomic_set(&phba->fast_event_count, 0);
7679 atomic_set(&phba->dbg_log_idx, 0);
7680 atomic_set(&phba->dbg_log_cnt, 0);
7681 atomic_set(&phba->dbg_log_dmping, 0);
7682 spin_lock_init(&phba->hbalock);
7683
7684 /* Initialize port_list spinlock */
7685 spin_lock_init(&phba->port_list_lock);
7686 INIT_LIST_HEAD(&phba->port_list);
7687
7688 INIT_LIST_HEAD(&phba->work_list);
7689
7690 /* Initialize the wait queue head for the kernel thread */
7691 init_waitqueue_head(&phba->work_waitq);
7692
7693 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7694 "1403 Protocols supported %s %s %s\n",
7695 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7696 "SCSI" : " "),
7697 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7698 "NVME" : " "),
7699 (phba->nvmet_support ? "NVMET" : " "));
7700
7701 /* ras_fwlog state */
7702 spin_lock_init(&phba->ras_fwlog_lock);
7703
7704 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7705 spin_lock_init(&phba->scsi_buf_list_get_lock);
7706 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7707 spin_lock_init(&phba->scsi_buf_list_put_lock);
7708 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7709
7710 /* Initialize the fabric iocb list */
7711 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7712
7713 /* Initialize list to save ELS buffers */
7714 INIT_LIST_HEAD(&phba->elsbuf);
7715
7716 /* Initialize FCF connection rec list */
7717 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7718
7719 /* Initialize OAS configuration list */
7720 spin_lock_init(&phba->devicelock);
7721 INIT_LIST_HEAD(&phba->luns);
7722
7723 /* MBOX heartbeat timer */
7724 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7725 /* Fabric block timer */
7726 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7727 /* EA polling mode timer */
7728 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7729 /* Heartbeat timer */
7730 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7731
7732 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7733
7734 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7735 lpfc_idle_stat_delay_work);
7736 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7737 return 0;
7738 }
7739
7740 /**
7741 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7742 * @phba: pointer to lpfc hba data structure.
7743 *
7744 * This routine is invoked to set up the driver internal resources specific to
7745 * support the SLI-3 HBA device it attached to.
7746 *
7747 * Return codes
7748 * 0 - successful
7749 * other values - error
7750 **/
7751 static int
lpfc_sli_driver_resource_setup(struct lpfc_hba * phba)7752 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7753 {
7754 int rc, entry_sz;
7755
7756 /*
7757 * Initialize timers used by driver
7758 */
7759
7760 /* FCP polling mode timer */
7761 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7762
7763 /* Host attention work mask setup */
7764 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7765 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7766
7767 /* Get all the module params for configuring this host */
7768 lpfc_get_cfgparam(phba);
7769 /* Set up phase-1 common device driver resources */
7770
7771 rc = lpfc_setup_driver_resource_phase1(phba);
7772 if (rc)
7773 return -ENODEV;
7774
7775 if (!phba->sli.sli3_ring)
7776 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7777 sizeof(struct lpfc_sli_ring),
7778 GFP_KERNEL);
7779 if (!phba->sli.sli3_ring)
7780 return -ENOMEM;
7781
7782 /*
7783 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7784 * used to create the sg_dma_buf_pool must be dynamically calculated.
7785 */
7786
7787 if (phba->sli_rev == LPFC_SLI_REV4)
7788 entry_sz = sizeof(struct sli4_sge);
7789 else
7790 entry_sz = sizeof(struct ulp_bde64);
7791
7792 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7793 if (phba->cfg_enable_bg) {
7794 /*
7795 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7796 * the FCP rsp, and a BDE for each. Sice we have no control
7797 * over how many protection data segments the SCSI Layer
7798 * will hand us (ie: there could be one for every block
7799 * in the IO), we just allocate enough BDEs to accomidate
7800 * our max amount and we need to limit lpfc_sg_seg_cnt to
7801 * minimize the risk of running out.
7802 */
7803 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7804 sizeof(struct fcp_rsp) +
7805 (LPFC_MAX_SG_SEG_CNT * entry_sz);
7806
7807 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7808 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7809
7810 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7811 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7812 } else {
7813 /*
7814 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7815 * the FCP rsp, a BDE for each, and a BDE for up to
7816 * cfg_sg_seg_cnt data segments.
7817 */
7818 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7819 sizeof(struct fcp_rsp) +
7820 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7821
7822 /* Total BDEs in BPL for scsi_sg_list */
7823 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7824 }
7825
7826 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7827 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7828 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7829 phba->cfg_total_seg_cnt);
7830
7831 phba->max_vpi = LPFC_MAX_VPI;
7832 /* This will be set to correct value after config_port mbox */
7833 phba->max_vports = 0;
7834
7835 /*
7836 * Initialize the SLI Layer to run with lpfc HBAs.
7837 */
7838 lpfc_sli_setup(phba);
7839 lpfc_sli_queue_init(phba);
7840
7841 /* Allocate device driver memory */
7842 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7843 return -ENOMEM;
7844
7845 phba->lpfc_sg_dma_buf_pool =
7846 dma_pool_create("lpfc_sg_dma_buf_pool",
7847 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7848 BPL_ALIGN_SZ, 0);
7849
7850 if (!phba->lpfc_sg_dma_buf_pool)
7851 goto fail_free_mem;
7852
7853 phba->lpfc_cmd_rsp_buf_pool =
7854 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7855 &phba->pcidev->dev,
7856 sizeof(struct fcp_cmnd) +
7857 sizeof(struct fcp_rsp),
7858 BPL_ALIGN_SZ, 0);
7859
7860 if (!phba->lpfc_cmd_rsp_buf_pool)
7861 goto fail_free_dma_buf_pool;
7862
7863 /*
7864 * Enable sr-iov virtual functions if supported and configured
7865 * through the module parameter.
7866 */
7867 if (phba->cfg_sriov_nr_virtfn > 0) {
7868 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7869 phba->cfg_sriov_nr_virtfn);
7870 if (rc) {
7871 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7872 "2808 Requested number of SR-IOV "
7873 "virtual functions (%d) is not "
7874 "supported\n",
7875 phba->cfg_sriov_nr_virtfn);
7876 phba->cfg_sriov_nr_virtfn = 0;
7877 }
7878 }
7879
7880 return 0;
7881
7882 fail_free_dma_buf_pool:
7883 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7884 phba->lpfc_sg_dma_buf_pool = NULL;
7885 fail_free_mem:
7886 lpfc_mem_free(phba);
7887 return -ENOMEM;
7888 }
7889
7890 /**
7891 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7892 * @phba: pointer to lpfc hba data structure.
7893 *
7894 * This routine is invoked to unset the driver internal resources set up
7895 * specific for supporting the SLI-3 HBA device it attached to.
7896 **/
7897 static void
lpfc_sli_driver_resource_unset(struct lpfc_hba * phba)7898 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7899 {
7900 /* Free device driver memory allocated */
7901 lpfc_mem_free_all(phba);
7902
7903 return;
7904 }
7905
7906 /**
7907 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7908 * @phba: pointer to lpfc hba data structure.
7909 *
7910 * This routine is invoked to set up the driver internal resources specific to
7911 * support the SLI-4 HBA device it attached to.
7912 *
7913 * Return codes
7914 * 0 - successful
7915 * other values - error
7916 **/
7917 static int
lpfc_sli4_driver_resource_setup(struct lpfc_hba * phba)7918 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7919 {
7920 LPFC_MBOXQ_t *mboxq;
7921 MAILBOX_t *mb;
7922 int rc, i, max_buf_size;
7923 int longs;
7924 int extra;
7925 uint64_t wwn;
7926 u32 if_type;
7927 u32 if_fam;
7928
7929 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7930 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7931 phba->sli4_hba.curr_disp_cpu = 0;
7932
7933 /* Get all the module params for configuring this host */
7934 lpfc_get_cfgparam(phba);
7935
7936 /* Set up phase-1 common device driver resources */
7937 rc = lpfc_setup_driver_resource_phase1(phba);
7938 if (rc)
7939 return -ENODEV;
7940
7941 /* Before proceed, wait for POST done and device ready */
7942 rc = lpfc_sli4_post_status_check(phba);
7943 if (rc)
7944 return -ENODEV;
7945
7946 /* Allocate all driver workqueues here */
7947
7948 /* The lpfc_wq workqueue for deferred irq use */
7949 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7950 if (!phba->wq)
7951 return -ENOMEM;
7952
7953 /*
7954 * Initialize timers used by driver
7955 */
7956
7957 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7958
7959 /* FCF rediscover timer */
7960 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7961
7962 /* CMF congestion timer */
7963 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7964 phba->cmf_timer.function = lpfc_cmf_timer;
7965 /* CMF 1 minute stats collection timer */
7966 hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7967 phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
7968
7969 /*
7970 * Control structure for handling external multi-buffer mailbox
7971 * command pass-through.
7972 */
7973 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7974 sizeof(struct lpfc_mbox_ext_buf_ctx));
7975 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7976
7977 phba->max_vpi = LPFC_MAX_VPI;
7978
7979 /* This will be set to correct value after the read_config mbox */
7980 phba->max_vports = 0;
7981
7982 /* Program the default value of vlan_id and fc_map */
7983 phba->valid_vlan = 0;
7984 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7985 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7986 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7987
7988 /*
7989 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7990 * we will associate a new ring, for each EQ/CQ/WQ tuple.
7991 * The WQ create will allocate the ring.
7992 */
7993
7994 /* Initialize buffer queue management fields */
7995 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7996 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7997 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7998
7999 /* for VMID idle timeout if VMID is enabled */
8000 if (lpfc_is_vmid_enabled(phba))
8001 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
8002
8003 /*
8004 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
8005 */
8006 /* Initialize the Abort buffer list used by driver */
8007 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8008 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8009
8010 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8011 /* Initialize the Abort nvme buffer list used by driver */
8012 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8013 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8014 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8015 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8016 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8017 }
8018
8019 /* This abort list used by worker thread */
8020 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8021 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8022 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8023 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8024
8025 /*
8026 * Initialize driver internal slow-path work queues
8027 */
8028
8029 /* Driver internel slow-path CQ Event pool */
8030 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8031 /* Response IOCB work queue list */
8032 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8033 /* Asynchronous event CQ Event work queue list */
8034 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8035 /* Slow-path XRI aborted CQ Event work queue list */
8036 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8037 /* Receive queue CQ Event work queue list */
8038 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8039
8040 /* Initialize extent block lists. */
8041 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8042 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8043 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8044 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8045
8046 /* Initialize mboxq lists. If the early init routines fail
8047 * these lists need to be correctly initialized.
8048 */
8049 INIT_LIST_HEAD(&phba->sli.mboxq);
8050 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8051
8052 /* initialize optic_state to 0xFF */
8053 phba->sli4_hba.lnk_info.optic_state = 0xff;
8054
8055 /* Allocate device driver memory */
8056 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8057 if (rc)
8058 goto out_destroy_workqueue;
8059
8060 /* IF Type 2 ports get initialized now. */
8061 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8062 LPFC_SLI_INTF_IF_TYPE_2) {
8063 rc = lpfc_pci_function_reset(phba);
8064 if (unlikely(rc)) {
8065 rc = -ENODEV;
8066 goto out_free_mem;
8067 }
8068 phba->temp_sensor_support = 1;
8069 }
8070
8071 /* Create the bootstrap mailbox command */
8072 rc = lpfc_create_bootstrap_mbox(phba);
8073 if (unlikely(rc))
8074 goto out_free_mem;
8075
8076 /* Set up the host's endian order with the device. */
8077 rc = lpfc_setup_endian_order(phba);
8078 if (unlikely(rc))
8079 goto out_free_bsmbx;
8080
8081 /* Set up the hba's configuration parameters. */
8082 rc = lpfc_sli4_read_config(phba);
8083 if (unlikely(rc))
8084 goto out_free_bsmbx;
8085
8086 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8087 /* Right now the link is down, if FA-PWWN is configured the
8088 * firmware will try FLOGI before the driver gets a link up.
8089 * If it fails, the driver should get a MISCONFIGURED async
8090 * event which will clear this flag. The only notification
8091 * the driver gets is if it fails, if it succeeds there is no
8092 * notification given. Assume success.
8093 */
8094 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8095 }
8096
8097 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8098 if (unlikely(rc))
8099 goto out_free_bsmbx;
8100
8101 /* IF Type 0 ports get initialized now. */
8102 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8103 LPFC_SLI_INTF_IF_TYPE_0) {
8104 rc = lpfc_pci_function_reset(phba);
8105 if (unlikely(rc))
8106 goto out_free_bsmbx;
8107 }
8108
8109 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8110 GFP_KERNEL);
8111 if (!mboxq) {
8112 rc = -ENOMEM;
8113 goto out_free_bsmbx;
8114 }
8115
8116 /* Check for NVMET being configured */
8117 phba->nvmet_support = 0;
8118 if (lpfc_enable_nvmet_cnt) {
8119
8120 /* First get WWN of HBA instance */
8121 lpfc_read_nv(phba, mboxq);
8122 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8123 if (rc != MBX_SUCCESS) {
8124 lpfc_printf_log(phba, KERN_ERR,
8125 LOG_TRACE_EVENT,
8126 "6016 Mailbox failed , mbxCmd x%x "
8127 "READ_NV, mbxStatus x%x\n",
8128 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8129 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8130 mempool_free(mboxq, phba->mbox_mem_pool);
8131 rc = -EIO;
8132 goto out_free_bsmbx;
8133 }
8134 mb = &mboxq->u.mb;
8135 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8136 sizeof(uint64_t));
8137 wwn = cpu_to_be64(wwn);
8138 phba->sli4_hba.wwnn.u.name = wwn;
8139 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8140 sizeof(uint64_t));
8141 /* wwn is WWPN of HBA instance */
8142 wwn = cpu_to_be64(wwn);
8143 phba->sli4_hba.wwpn.u.name = wwn;
8144
8145 /* Check to see if it matches any module parameter */
8146 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8147 if (wwn == lpfc_enable_nvmet[i]) {
8148 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8149 if (lpfc_nvmet_mem_alloc(phba))
8150 break;
8151
8152 phba->nvmet_support = 1; /* a match */
8153
8154 lpfc_printf_log(phba, KERN_ERR,
8155 LOG_TRACE_EVENT,
8156 "6017 NVME Target %016llx\n",
8157 wwn);
8158 #else
8159 lpfc_printf_log(phba, KERN_ERR,
8160 LOG_TRACE_EVENT,
8161 "6021 Can't enable NVME Target."
8162 " NVME_TARGET_FC infrastructure"
8163 " is not in kernel\n");
8164 #endif
8165 /* Not supported for NVMET */
8166 phba->cfg_xri_rebalancing = 0;
8167 if (phba->irq_chann_mode == NHT_MODE) {
8168 phba->cfg_irq_chann =
8169 phba->sli4_hba.num_present_cpu;
8170 phba->cfg_hdw_queue =
8171 phba->sli4_hba.num_present_cpu;
8172 phba->irq_chann_mode = NORMAL_MODE;
8173 }
8174 break;
8175 }
8176 }
8177 }
8178
8179 lpfc_nvme_mod_param_dep(phba);
8180
8181 /*
8182 * Get sli4 parameters that override parameters from Port capabilities.
8183 * If this call fails, it isn't critical unless the SLI4 parameters come
8184 * back in conflict.
8185 */
8186 rc = lpfc_get_sli4_parameters(phba, mboxq);
8187 if (rc) {
8188 if_type = bf_get(lpfc_sli_intf_if_type,
8189 &phba->sli4_hba.sli_intf);
8190 if_fam = bf_get(lpfc_sli_intf_sli_family,
8191 &phba->sli4_hba.sli_intf);
8192 if (phba->sli4_hba.extents_in_use &&
8193 phba->sli4_hba.rpi_hdrs_in_use) {
8194 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8195 "2999 Unsupported SLI4 Parameters "
8196 "Extents and RPI headers enabled.\n");
8197 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8198 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
8199 mempool_free(mboxq, phba->mbox_mem_pool);
8200 rc = -EIO;
8201 goto out_free_bsmbx;
8202 }
8203 }
8204 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8205 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8206 mempool_free(mboxq, phba->mbox_mem_pool);
8207 rc = -EIO;
8208 goto out_free_bsmbx;
8209 }
8210 }
8211
8212 /*
8213 * 1 for cmd, 1 for rsp, NVME adds an extra one
8214 * for boundary conditions in its max_sgl_segment template.
8215 */
8216 extra = 2;
8217 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8218 extra++;
8219
8220 /*
8221 * It doesn't matter what family our adapter is in, we are
8222 * limited to 2 Pages, 512 SGEs, for our SGL.
8223 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8224 */
8225 max_buf_size = (2 * SLI4_PAGE_SIZE);
8226
8227 /*
8228 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8229 * used to create the sg_dma_buf_pool must be calculated.
8230 */
8231 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8232 /* Both cfg_enable_bg and cfg_external_dif code paths */
8233
8234 /*
8235 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8236 * the FCP rsp, and a SGE. Sice we have no control
8237 * over how many protection segments the SCSI Layer
8238 * will hand us (ie: there could be one for every block
8239 * in the IO), just allocate enough SGEs to accomidate
8240 * our max amount and we need to limit lpfc_sg_seg_cnt
8241 * to minimize the risk of running out.
8242 */
8243 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8244 sizeof(struct fcp_rsp) + max_buf_size;
8245
8246 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8247 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8248
8249 /*
8250 * If supporting DIF, reduce the seg count for scsi to
8251 * allow room for the DIF sges.
8252 */
8253 if (phba->cfg_enable_bg &&
8254 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8255 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8256 else
8257 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8258
8259 } else {
8260 /*
8261 * The scsi_buf for a regular I/O holds the FCP cmnd,
8262 * the FCP rsp, a SGE for each, and a SGE for up to
8263 * cfg_sg_seg_cnt data segments.
8264 */
8265 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8266 sizeof(struct fcp_rsp) +
8267 ((phba->cfg_sg_seg_cnt + extra) *
8268 sizeof(struct sli4_sge));
8269
8270 /* Total SGEs for scsi_sg_list */
8271 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8272 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8273
8274 /*
8275 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8276 * need to post 1 page for the SGL.
8277 */
8278 }
8279
8280 if (phba->cfg_xpsgl && !phba->nvmet_support)
8281 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8282 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8283 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8284 else
8285 phba->cfg_sg_dma_buf_size =
8286 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8287
8288 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8289 sizeof(struct sli4_sge);
8290
8291 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8292 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8293 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8294 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8295 "6300 Reducing NVME sg segment "
8296 "cnt to %d\n",
8297 LPFC_MAX_NVME_SEG_CNT);
8298 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8299 } else
8300 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8301 }
8302
8303 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8304 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8305 "total:%d scsi:%d nvme:%d\n",
8306 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8307 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8308 phba->cfg_nvme_seg_cnt);
8309
8310 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8311 i = phba->cfg_sg_dma_buf_size;
8312 else
8313 i = SLI4_PAGE_SIZE;
8314
8315 phba->lpfc_sg_dma_buf_pool =
8316 dma_pool_create("lpfc_sg_dma_buf_pool",
8317 &phba->pcidev->dev,
8318 phba->cfg_sg_dma_buf_size,
8319 i, 0);
8320 if (!phba->lpfc_sg_dma_buf_pool) {
8321 rc = -ENOMEM;
8322 goto out_free_bsmbx;
8323 }
8324
8325 phba->lpfc_cmd_rsp_buf_pool =
8326 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8327 &phba->pcidev->dev,
8328 sizeof(struct fcp_cmnd) +
8329 sizeof(struct fcp_rsp),
8330 i, 0);
8331 if (!phba->lpfc_cmd_rsp_buf_pool) {
8332 rc = -ENOMEM;
8333 goto out_free_sg_dma_buf;
8334 }
8335
8336 mempool_free(mboxq, phba->mbox_mem_pool);
8337
8338 /* Verify OAS is supported */
8339 lpfc_sli4_oas_verify(phba);
8340
8341 /* Verify RAS support on adapter */
8342 lpfc_sli4_ras_init(phba);
8343
8344 /* Verify all the SLI4 queues */
8345 rc = lpfc_sli4_queue_verify(phba);
8346 if (rc)
8347 goto out_free_cmd_rsp_buf;
8348
8349 /* Create driver internal CQE event pool */
8350 rc = lpfc_sli4_cq_event_pool_create(phba);
8351 if (rc)
8352 goto out_free_cmd_rsp_buf;
8353
8354 /* Initialize sgl lists per host */
8355 lpfc_init_sgl_list(phba);
8356
8357 /* Allocate and initialize active sgl array */
8358 rc = lpfc_init_active_sgl_array(phba);
8359 if (rc) {
8360 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8361 "1430 Failed to initialize sgl list.\n");
8362 goto out_destroy_cq_event_pool;
8363 }
8364 rc = lpfc_sli4_init_rpi_hdrs(phba);
8365 if (rc) {
8366 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8367 "1432 Failed to initialize rpi headers.\n");
8368 goto out_free_active_sgl;
8369 }
8370
8371 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8372 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8373 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8374 GFP_KERNEL);
8375 if (!phba->fcf.fcf_rr_bmask) {
8376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8377 "2759 Failed allocate memory for FCF round "
8378 "robin failover bmask\n");
8379 rc = -ENOMEM;
8380 goto out_remove_rpi_hdrs;
8381 }
8382
8383 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8384 sizeof(struct lpfc_hba_eq_hdl),
8385 GFP_KERNEL);
8386 if (!phba->sli4_hba.hba_eq_hdl) {
8387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8388 "2572 Failed allocate memory for "
8389 "fast-path per-EQ handle array\n");
8390 rc = -ENOMEM;
8391 goto out_free_fcf_rr_bmask;
8392 }
8393
8394 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8395 sizeof(struct lpfc_vector_map_info),
8396 GFP_KERNEL);
8397 if (!phba->sli4_hba.cpu_map) {
8398 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8399 "3327 Failed allocate memory for msi-x "
8400 "interrupt vector mapping\n");
8401 rc = -ENOMEM;
8402 goto out_free_hba_eq_hdl;
8403 }
8404
8405 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8406 if (!phba->sli4_hba.eq_info) {
8407 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8408 "3321 Failed allocation for per_cpu stats\n");
8409 rc = -ENOMEM;
8410 goto out_free_hba_cpu_map;
8411 }
8412
8413 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8414 sizeof(*phba->sli4_hba.idle_stat),
8415 GFP_KERNEL);
8416 if (!phba->sli4_hba.idle_stat) {
8417 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8418 "3390 Failed allocation for idle_stat\n");
8419 rc = -ENOMEM;
8420 goto out_free_hba_eq_info;
8421 }
8422
8423 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8424 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8425 if (!phba->sli4_hba.c_stat) {
8426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8427 "3332 Failed allocating per cpu hdwq stats\n");
8428 rc = -ENOMEM;
8429 goto out_free_hba_idle_stat;
8430 }
8431 #endif
8432
8433 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8434 if (!phba->cmf_stat) {
8435 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8436 "3331 Failed allocating per cpu cgn stats\n");
8437 rc = -ENOMEM;
8438 goto out_free_hba_hdwq_info;
8439 }
8440
8441 /*
8442 * Enable sr-iov virtual functions if supported and configured
8443 * through the module parameter.
8444 */
8445 if (phba->cfg_sriov_nr_virtfn > 0) {
8446 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8447 phba->cfg_sriov_nr_virtfn);
8448 if (rc) {
8449 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8450 "3020 Requested number of SR-IOV "
8451 "virtual functions (%d) is not "
8452 "supported\n",
8453 phba->cfg_sriov_nr_virtfn);
8454 phba->cfg_sriov_nr_virtfn = 0;
8455 }
8456 }
8457
8458 return 0;
8459
8460 out_free_hba_hdwq_info:
8461 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8462 free_percpu(phba->sli4_hba.c_stat);
8463 out_free_hba_idle_stat:
8464 #endif
8465 kfree(phba->sli4_hba.idle_stat);
8466 out_free_hba_eq_info:
8467 free_percpu(phba->sli4_hba.eq_info);
8468 out_free_hba_cpu_map:
8469 kfree(phba->sli4_hba.cpu_map);
8470 out_free_hba_eq_hdl:
8471 kfree(phba->sli4_hba.hba_eq_hdl);
8472 out_free_fcf_rr_bmask:
8473 kfree(phba->fcf.fcf_rr_bmask);
8474 out_remove_rpi_hdrs:
8475 lpfc_sli4_remove_rpi_hdrs(phba);
8476 out_free_active_sgl:
8477 lpfc_free_active_sgl(phba);
8478 out_destroy_cq_event_pool:
8479 lpfc_sli4_cq_event_pool_destroy(phba);
8480 out_free_cmd_rsp_buf:
8481 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8482 phba->lpfc_cmd_rsp_buf_pool = NULL;
8483 out_free_sg_dma_buf:
8484 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8485 phba->lpfc_sg_dma_buf_pool = NULL;
8486 out_free_bsmbx:
8487 lpfc_destroy_bootstrap_mbox(phba);
8488 out_free_mem:
8489 lpfc_mem_free(phba);
8490 out_destroy_workqueue:
8491 destroy_workqueue(phba->wq);
8492 phba->wq = NULL;
8493 return rc;
8494 }
8495
8496 /**
8497 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8498 * @phba: pointer to lpfc hba data structure.
8499 *
8500 * This routine is invoked to unset the driver internal resources set up
8501 * specific for supporting the SLI-4 HBA device it attached to.
8502 **/
8503 static void
lpfc_sli4_driver_resource_unset(struct lpfc_hba * phba)8504 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8505 {
8506 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8507
8508 free_percpu(phba->sli4_hba.eq_info);
8509 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8510 free_percpu(phba->sli4_hba.c_stat);
8511 #endif
8512 free_percpu(phba->cmf_stat);
8513 kfree(phba->sli4_hba.idle_stat);
8514
8515 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8516 kfree(phba->sli4_hba.cpu_map);
8517 phba->sli4_hba.num_possible_cpu = 0;
8518 phba->sli4_hba.num_present_cpu = 0;
8519 phba->sli4_hba.curr_disp_cpu = 0;
8520 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8521
8522 /* Free memory allocated for fast-path work queue handles */
8523 kfree(phba->sli4_hba.hba_eq_hdl);
8524
8525 /* Free the allocated rpi headers. */
8526 lpfc_sli4_remove_rpi_hdrs(phba);
8527 lpfc_sli4_remove_rpis(phba);
8528
8529 /* Free eligible FCF index bmask */
8530 kfree(phba->fcf.fcf_rr_bmask);
8531
8532 /* Free the ELS sgl list */
8533 lpfc_free_active_sgl(phba);
8534 lpfc_free_els_sgl_list(phba);
8535 lpfc_free_nvmet_sgl_list(phba);
8536
8537 /* Free the completion queue EQ event pool */
8538 lpfc_sli4_cq_event_release_all(phba);
8539 lpfc_sli4_cq_event_pool_destroy(phba);
8540
8541 /* Release resource identifiers. */
8542 lpfc_sli4_dealloc_resource_identifiers(phba);
8543
8544 /* Free the bsmbx region. */
8545 lpfc_destroy_bootstrap_mbox(phba);
8546
8547 /* Free the SLI Layer memory with SLI4 HBAs */
8548 lpfc_mem_free_all(phba);
8549
8550 /* Free the current connect table */
8551 list_for_each_entry_safe(conn_entry, next_conn_entry,
8552 &phba->fcf_conn_rec_list, list) {
8553 list_del_init(&conn_entry->list);
8554 kfree(conn_entry);
8555 }
8556
8557 return;
8558 }
8559
8560 /**
8561 * lpfc_init_api_table_setup - Set up init api function jump table
8562 * @phba: The hba struct for which this call is being executed.
8563 * @dev_grp: The HBA PCI-Device group number.
8564 *
8565 * This routine sets up the device INIT interface API function jump table
8566 * in @phba struct.
8567 *
8568 * Returns: 0 - success, -ENODEV - failure.
8569 **/
8570 int
lpfc_init_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)8571 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8572 {
8573 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8574 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8575 phba->lpfc_selective_reset = lpfc_selective_reset;
8576 switch (dev_grp) {
8577 case LPFC_PCI_DEV_LP:
8578 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8579 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8580 phba->lpfc_stop_port = lpfc_stop_port_s3;
8581 break;
8582 case LPFC_PCI_DEV_OC:
8583 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8584 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8585 phba->lpfc_stop_port = lpfc_stop_port_s4;
8586 break;
8587 default:
8588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8589 "1431 Invalid HBA PCI-device group: 0x%x\n",
8590 dev_grp);
8591 return -ENODEV;
8592 }
8593 return 0;
8594 }
8595
8596 /**
8597 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8598 * @phba: pointer to lpfc hba data structure.
8599 *
8600 * This routine is invoked to set up the driver internal resources after the
8601 * device specific resource setup to support the HBA device it attached to.
8602 *
8603 * Return codes
8604 * 0 - successful
8605 * other values - error
8606 **/
8607 static int
lpfc_setup_driver_resource_phase2(struct lpfc_hba * phba)8608 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8609 {
8610 int error;
8611
8612 /* Startup the kernel thread for this host adapter. */
8613 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8614 "lpfc_worker_%d", phba->brd_no);
8615 if (IS_ERR(phba->worker_thread)) {
8616 error = PTR_ERR(phba->worker_thread);
8617 return error;
8618 }
8619
8620 return 0;
8621 }
8622
8623 /**
8624 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8625 * @phba: pointer to lpfc hba data structure.
8626 *
8627 * This routine is invoked to unset the driver internal resources set up after
8628 * the device specific resource setup for supporting the HBA device it
8629 * attached to.
8630 **/
8631 static void
lpfc_unset_driver_resource_phase2(struct lpfc_hba * phba)8632 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8633 {
8634 if (phba->wq) {
8635 destroy_workqueue(phba->wq);
8636 phba->wq = NULL;
8637 }
8638
8639 /* Stop kernel worker thread */
8640 if (phba->worker_thread)
8641 kthread_stop(phba->worker_thread);
8642 }
8643
8644 /**
8645 * lpfc_free_iocb_list - Free iocb list.
8646 * @phba: pointer to lpfc hba data structure.
8647 *
8648 * This routine is invoked to free the driver's IOCB list and memory.
8649 **/
8650 void
lpfc_free_iocb_list(struct lpfc_hba * phba)8651 lpfc_free_iocb_list(struct lpfc_hba *phba)
8652 {
8653 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8654
8655 spin_lock_irq(&phba->hbalock);
8656 list_for_each_entry_safe(iocbq_entry, iocbq_next,
8657 &phba->lpfc_iocb_list, list) {
8658 list_del(&iocbq_entry->list);
8659 kfree(iocbq_entry);
8660 phba->total_iocbq_bufs--;
8661 }
8662 spin_unlock_irq(&phba->hbalock);
8663
8664 return;
8665 }
8666
8667 /**
8668 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8669 * @phba: pointer to lpfc hba data structure.
8670 * @iocb_count: number of requested iocbs
8671 *
8672 * This routine is invoked to allocate and initizlize the driver's IOCB
8673 * list and set up the IOCB tag array accordingly.
8674 *
8675 * Return codes
8676 * 0 - successful
8677 * other values - error
8678 **/
8679 int
lpfc_init_iocb_list(struct lpfc_hba * phba,int iocb_count)8680 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8681 {
8682 struct lpfc_iocbq *iocbq_entry = NULL;
8683 uint16_t iotag;
8684 int i;
8685
8686 /* Initialize and populate the iocb list per host. */
8687 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8688 for (i = 0; i < iocb_count; i++) {
8689 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8690 if (iocbq_entry == NULL) {
8691 printk(KERN_ERR "%s: only allocated %d iocbs of "
8692 "expected %d count. Unloading driver.\n",
8693 __func__, i, iocb_count);
8694 goto out_free_iocbq;
8695 }
8696
8697 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8698 if (iotag == 0) {
8699 kfree(iocbq_entry);
8700 printk(KERN_ERR "%s: failed to allocate IOTAG. "
8701 "Unloading driver.\n", __func__);
8702 goto out_free_iocbq;
8703 }
8704 iocbq_entry->sli4_lxritag = NO_XRI;
8705 iocbq_entry->sli4_xritag = NO_XRI;
8706
8707 spin_lock_irq(&phba->hbalock);
8708 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8709 phba->total_iocbq_bufs++;
8710 spin_unlock_irq(&phba->hbalock);
8711 }
8712
8713 return 0;
8714
8715 out_free_iocbq:
8716 lpfc_free_iocb_list(phba);
8717
8718 return -ENOMEM;
8719 }
8720
8721 /**
8722 * lpfc_free_sgl_list - Free a given sgl list.
8723 * @phba: pointer to lpfc hba data structure.
8724 * @sglq_list: pointer to the head of sgl list.
8725 *
8726 * This routine is invoked to free a give sgl list and memory.
8727 **/
8728 void
lpfc_free_sgl_list(struct lpfc_hba * phba,struct list_head * sglq_list)8729 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8730 {
8731 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8732
8733 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8734 list_del(&sglq_entry->list);
8735 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8736 kfree(sglq_entry);
8737 }
8738 }
8739
8740 /**
8741 * lpfc_free_els_sgl_list - Free els sgl list.
8742 * @phba: pointer to lpfc hba data structure.
8743 *
8744 * This routine is invoked to free the driver's els sgl list and memory.
8745 **/
8746 static void
lpfc_free_els_sgl_list(struct lpfc_hba * phba)8747 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8748 {
8749 LIST_HEAD(sglq_list);
8750
8751 /* Retrieve all els sgls from driver list */
8752 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8753 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8754 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8755
8756 /* Now free the sgl list */
8757 lpfc_free_sgl_list(phba, &sglq_list);
8758 }
8759
8760 /**
8761 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8762 * @phba: pointer to lpfc hba data structure.
8763 *
8764 * This routine is invoked to free the driver's nvmet sgl list and memory.
8765 **/
8766 static void
lpfc_free_nvmet_sgl_list(struct lpfc_hba * phba)8767 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8768 {
8769 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8770 LIST_HEAD(sglq_list);
8771
8772 /* Retrieve all nvmet sgls from driver list */
8773 spin_lock_irq(&phba->hbalock);
8774 spin_lock(&phba->sli4_hba.sgl_list_lock);
8775 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8776 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8777 spin_unlock_irq(&phba->hbalock);
8778
8779 /* Now free the sgl list */
8780 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8781 list_del(&sglq_entry->list);
8782 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8783 kfree(sglq_entry);
8784 }
8785
8786 /* Update the nvmet_xri_cnt to reflect no current sgls.
8787 * The next initialization cycle sets the count and allocates
8788 * the sgls over again.
8789 */
8790 phba->sli4_hba.nvmet_xri_cnt = 0;
8791 }
8792
8793 /**
8794 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8795 * @phba: pointer to lpfc hba data structure.
8796 *
8797 * This routine is invoked to allocate the driver's active sgl memory.
8798 * This array will hold the sglq_entry's for active IOs.
8799 **/
8800 static int
lpfc_init_active_sgl_array(struct lpfc_hba * phba)8801 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8802 {
8803 int size;
8804 size = sizeof(struct lpfc_sglq *);
8805 size *= phba->sli4_hba.max_cfg_param.max_xri;
8806
8807 phba->sli4_hba.lpfc_sglq_active_list =
8808 kzalloc(size, GFP_KERNEL);
8809 if (!phba->sli4_hba.lpfc_sglq_active_list)
8810 return -ENOMEM;
8811 return 0;
8812 }
8813
8814 /**
8815 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8816 * @phba: pointer to lpfc hba data structure.
8817 *
8818 * This routine is invoked to walk through the array of active sglq entries
8819 * and free all of the resources.
8820 * This is just a place holder for now.
8821 **/
8822 static void
lpfc_free_active_sgl(struct lpfc_hba * phba)8823 lpfc_free_active_sgl(struct lpfc_hba *phba)
8824 {
8825 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8826 }
8827
8828 /**
8829 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8830 * @phba: pointer to lpfc hba data structure.
8831 *
8832 * This routine is invoked to allocate and initizlize the driver's sgl
8833 * list and set up the sgl xritag tag array accordingly.
8834 *
8835 **/
8836 static void
lpfc_init_sgl_list(struct lpfc_hba * phba)8837 lpfc_init_sgl_list(struct lpfc_hba *phba)
8838 {
8839 /* Initialize and populate the sglq list per host/VF. */
8840 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8841 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8842 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8843 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8844
8845 /* els xri-sgl book keeping */
8846 phba->sli4_hba.els_xri_cnt = 0;
8847
8848 /* nvme xri-buffer book keeping */
8849 phba->sli4_hba.io_xri_cnt = 0;
8850 }
8851
8852 /**
8853 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8854 * @phba: pointer to lpfc hba data structure.
8855 *
8856 * This routine is invoked to post rpi header templates to the
8857 * port for those SLI4 ports that do not support extents. This routine
8858 * posts a PAGE_SIZE memory region to the port to hold up to
8859 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
8860 * and should be called only when interrupts are disabled.
8861 *
8862 * Return codes
8863 * 0 - successful
8864 * -ERROR - otherwise.
8865 **/
8866 int
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba * phba)8867 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8868 {
8869 int rc = 0;
8870 struct lpfc_rpi_hdr *rpi_hdr;
8871
8872 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8873 if (!phba->sli4_hba.rpi_hdrs_in_use)
8874 return rc;
8875 if (phba->sli4_hba.extents_in_use)
8876 return -EIO;
8877
8878 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8879 if (!rpi_hdr) {
8880 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8881 "0391 Error during rpi post operation\n");
8882 lpfc_sli4_remove_rpis(phba);
8883 rc = -ENODEV;
8884 }
8885
8886 return rc;
8887 }
8888
8889 /**
8890 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8891 * @phba: pointer to lpfc hba data structure.
8892 *
8893 * This routine is invoked to allocate a single 4KB memory region to
8894 * support rpis and stores them in the phba. This single region
8895 * provides support for up to 64 rpis. The region is used globally
8896 * by the device.
8897 *
8898 * Returns:
8899 * A valid rpi hdr on success.
8900 * A NULL pointer on any failure.
8901 **/
8902 struct lpfc_rpi_hdr *
lpfc_sli4_create_rpi_hdr(struct lpfc_hba * phba)8903 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8904 {
8905 uint16_t rpi_limit, curr_rpi_range;
8906 struct lpfc_dmabuf *dmabuf;
8907 struct lpfc_rpi_hdr *rpi_hdr;
8908
8909 /*
8910 * If the SLI4 port supports extents, posting the rpi header isn't
8911 * required. Set the expected maximum count and let the actual value
8912 * get set when extents are fully allocated.
8913 */
8914 if (!phba->sli4_hba.rpi_hdrs_in_use)
8915 return NULL;
8916 if (phba->sli4_hba.extents_in_use)
8917 return NULL;
8918
8919 /* The limit on the logical index is just the max_rpi count. */
8920 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8921
8922 spin_lock_irq(&phba->hbalock);
8923 /*
8924 * Establish the starting RPI in this header block. The starting
8925 * rpi is normalized to a zero base because the physical rpi is
8926 * port based.
8927 */
8928 curr_rpi_range = phba->sli4_hba.next_rpi;
8929 spin_unlock_irq(&phba->hbalock);
8930
8931 /* Reached full RPI range */
8932 if (curr_rpi_range == rpi_limit)
8933 return NULL;
8934
8935 /*
8936 * First allocate the protocol header region for the port. The
8937 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8938 */
8939 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8940 if (!dmabuf)
8941 return NULL;
8942
8943 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8944 LPFC_HDR_TEMPLATE_SIZE,
8945 &dmabuf->phys, GFP_KERNEL);
8946 if (!dmabuf->virt) {
8947 rpi_hdr = NULL;
8948 goto err_free_dmabuf;
8949 }
8950
8951 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8952 rpi_hdr = NULL;
8953 goto err_free_coherent;
8954 }
8955
8956 /* Save the rpi header data for cleanup later. */
8957 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8958 if (!rpi_hdr)
8959 goto err_free_coherent;
8960
8961 rpi_hdr->dmabuf = dmabuf;
8962 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8963 rpi_hdr->page_count = 1;
8964 spin_lock_irq(&phba->hbalock);
8965
8966 /* The rpi_hdr stores the logical index only. */
8967 rpi_hdr->start_rpi = curr_rpi_range;
8968 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8969 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8970
8971 spin_unlock_irq(&phba->hbalock);
8972 return rpi_hdr;
8973
8974 err_free_coherent:
8975 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8976 dmabuf->virt, dmabuf->phys);
8977 err_free_dmabuf:
8978 kfree(dmabuf);
8979 return NULL;
8980 }
8981
8982 /**
8983 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8984 * @phba: pointer to lpfc hba data structure.
8985 *
8986 * This routine is invoked to remove all memory resources allocated
8987 * to support rpis for SLI4 ports not supporting extents. This routine
8988 * presumes the caller has released all rpis consumed by fabric or port
8989 * logins and is prepared to have the header pages removed.
8990 **/
8991 void
lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba * phba)8992 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8993 {
8994 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8995
8996 if (!phba->sli4_hba.rpi_hdrs_in_use)
8997 goto exit;
8998
8999 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
9000 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
9001 list_del(&rpi_hdr->list);
9002 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
9003 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
9004 kfree(rpi_hdr->dmabuf);
9005 kfree(rpi_hdr);
9006 }
9007 exit:
9008 /* There are no rpis available to the port now. */
9009 phba->sli4_hba.next_rpi = 0;
9010 }
9011
9012 /**
9013 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
9014 * @pdev: pointer to pci device data structure.
9015 *
9016 * This routine is invoked to allocate the driver hba data structure for an
9017 * HBA device. If the allocation is successful, the phba reference to the
9018 * PCI device data structure is set.
9019 *
9020 * Return codes
9021 * pointer to @phba - successful
9022 * NULL - error
9023 **/
9024 static struct lpfc_hba *
lpfc_hba_alloc(struct pci_dev * pdev)9025 lpfc_hba_alloc(struct pci_dev *pdev)
9026 {
9027 struct lpfc_hba *phba;
9028
9029 /* Allocate memory for HBA structure */
9030 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
9031 if (!phba) {
9032 dev_err(&pdev->dev, "failed to allocate hba struct\n");
9033 return NULL;
9034 }
9035
9036 /* Set reference to PCI device in HBA structure */
9037 phba->pcidev = pdev;
9038
9039 /* Assign an unused board number */
9040 phba->brd_no = lpfc_get_instance();
9041 if (phba->brd_no < 0) {
9042 kfree(phba);
9043 return NULL;
9044 }
9045 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9046
9047 spin_lock_init(&phba->ct_ev_lock);
9048 INIT_LIST_HEAD(&phba->ct_ev_waiters);
9049
9050 return phba;
9051 }
9052
9053 /**
9054 * lpfc_hba_free - Free driver hba data structure with a device.
9055 * @phba: pointer to lpfc hba data structure.
9056 *
9057 * This routine is invoked to free the driver hba data structure with an
9058 * HBA device.
9059 **/
9060 static void
lpfc_hba_free(struct lpfc_hba * phba)9061 lpfc_hba_free(struct lpfc_hba *phba)
9062 {
9063 if (phba->sli_rev == LPFC_SLI_REV4)
9064 kfree(phba->sli4_hba.hdwq);
9065
9066 /* Release the driver assigned board number */
9067 idr_remove(&lpfc_hba_index, phba->brd_no);
9068
9069 /* Free memory allocated with sli3 rings */
9070 kfree(phba->sli.sli3_ring);
9071 phba->sli.sli3_ring = NULL;
9072
9073 kfree(phba);
9074 return;
9075 }
9076
9077 /**
9078 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9079 * @vport: pointer to lpfc vport data structure.
9080 *
9081 * This routine is will setup initial FDMI attribute masks for
9082 * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
9083 * to get these attributes first before falling back, the attribute
9084 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9085 **/
9086 void
lpfc_setup_fdmi_mask(struct lpfc_vport * vport)9087 lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
9088 {
9089 struct lpfc_hba *phba = vport->phba;
9090
9091 vport->load_flag |= FC_ALLOW_FDMI;
9092 if (phba->cfg_enable_SmartSAN ||
9093 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9094 /* Setup appropriate attribute masks */
9095 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9096 if (phba->cfg_enable_SmartSAN)
9097 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9098 else
9099 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9100 }
9101
9102 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
9103 "6077 Setup FDMI mask: hba x%x port x%x\n",
9104 vport->fdmi_hba_mask, vport->fdmi_port_mask);
9105 }
9106
9107 /**
9108 * lpfc_create_shost - Create hba physical port with associated scsi host.
9109 * @phba: pointer to lpfc hba data structure.
9110 *
9111 * This routine is invoked to create HBA physical port and associate a SCSI
9112 * host with it.
9113 *
9114 * Return codes
9115 * 0 - successful
9116 * other values - error
9117 **/
9118 static int
lpfc_create_shost(struct lpfc_hba * phba)9119 lpfc_create_shost(struct lpfc_hba *phba)
9120 {
9121 struct lpfc_vport *vport;
9122 struct Scsi_Host *shost;
9123
9124 /* Initialize HBA FC structure */
9125 phba->fc_edtov = FF_DEF_EDTOV;
9126 phba->fc_ratov = FF_DEF_RATOV;
9127 phba->fc_altov = FF_DEF_ALTOV;
9128 phba->fc_arbtov = FF_DEF_ARBTOV;
9129
9130 atomic_set(&phba->sdev_cnt, 0);
9131 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9132 if (!vport)
9133 return -ENODEV;
9134
9135 shost = lpfc_shost_from_vport(vport);
9136 phba->pport = vport;
9137
9138 if (phba->nvmet_support) {
9139 /* Only 1 vport (pport) will support NVME target */
9140 phba->targetport = NULL;
9141 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9143 "6076 NVME Target Found\n");
9144 }
9145
9146 lpfc_debugfs_initialize(vport);
9147 /* Put reference to SCSI host to driver's device private data */
9148 pci_set_drvdata(phba->pcidev, shost);
9149
9150 lpfc_setup_fdmi_mask(vport);
9151
9152 /*
9153 * At this point we are fully registered with PSA. In addition,
9154 * any initial discovery should be completed.
9155 */
9156 return 0;
9157 }
9158
9159 /**
9160 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9161 * @phba: pointer to lpfc hba data structure.
9162 *
9163 * This routine is invoked to destroy HBA physical port and the associated
9164 * SCSI host.
9165 **/
9166 static void
lpfc_destroy_shost(struct lpfc_hba * phba)9167 lpfc_destroy_shost(struct lpfc_hba *phba)
9168 {
9169 struct lpfc_vport *vport = phba->pport;
9170
9171 /* Destroy physical port that associated with the SCSI host */
9172 destroy_port(vport);
9173
9174 return;
9175 }
9176
9177 /**
9178 * lpfc_setup_bg - Setup Block guard structures and debug areas.
9179 * @phba: pointer to lpfc hba data structure.
9180 * @shost: the shost to be used to detect Block guard settings.
9181 *
9182 * This routine sets up the local Block guard protocol settings for @shost.
9183 * This routine also allocates memory for debugging bg buffers.
9184 **/
9185 static void
lpfc_setup_bg(struct lpfc_hba * phba,struct Scsi_Host * shost)9186 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9187 {
9188 uint32_t old_mask;
9189 uint32_t old_guard;
9190
9191 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9192 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9193 "1478 Registering BlockGuard with the "
9194 "SCSI layer\n");
9195
9196 old_mask = phba->cfg_prot_mask;
9197 old_guard = phba->cfg_prot_guard;
9198
9199 /* Only allow supported values */
9200 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9201 SHOST_DIX_TYPE0_PROTECTION |
9202 SHOST_DIX_TYPE1_PROTECTION);
9203 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9204 SHOST_DIX_GUARD_CRC);
9205
9206 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9207 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9208 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9209
9210 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9211 if ((old_mask != phba->cfg_prot_mask) ||
9212 (old_guard != phba->cfg_prot_guard))
9213 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9214 "1475 Registering BlockGuard with the "
9215 "SCSI layer: mask %d guard %d\n",
9216 phba->cfg_prot_mask,
9217 phba->cfg_prot_guard);
9218
9219 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9220 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9221 } else
9222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9223 "1479 Not Registering BlockGuard with the SCSI "
9224 "layer, Bad protection parameters: %d %d\n",
9225 old_mask, old_guard);
9226 }
9227 }
9228
9229 /**
9230 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9231 * @phba: pointer to lpfc hba data structure.
9232 *
9233 * This routine is invoked to perform all the necessary post initialization
9234 * setup for the device.
9235 **/
9236 static void
lpfc_post_init_setup(struct lpfc_hba * phba)9237 lpfc_post_init_setup(struct lpfc_hba *phba)
9238 {
9239 struct Scsi_Host *shost;
9240 struct lpfc_adapter_event_header adapter_event;
9241
9242 /* Get the default values for Model Name and Description */
9243 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9244
9245 /*
9246 * hba setup may have changed the hba_queue_depth so we need to
9247 * adjust the value of can_queue.
9248 */
9249 shost = pci_get_drvdata(phba->pcidev);
9250 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9251
9252 lpfc_host_attrib_init(shost);
9253
9254 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9255 spin_lock_irq(shost->host_lock);
9256 lpfc_poll_start_timer(phba);
9257 spin_unlock_irq(shost->host_lock);
9258 }
9259
9260 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9261 "0428 Perform SCSI scan\n");
9262 /* Send board arrival event to upper layer */
9263 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9264 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9265 fc_host_post_vendor_event(shost, fc_get_event_number(),
9266 sizeof(adapter_event),
9267 (char *) &adapter_event,
9268 LPFC_NL_VENDOR_ID);
9269 return;
9270 }
9271
9272 /**
9273 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9274 * @phba: pointer to lpfc hba data structure.
9275 *
9276 * This routine is invoked to set up the PCI device memory space for device
9277 * with SLI-3 interface spec.
9278 *
9279 * Return codes
9280 * 0 - successful
9281 * other values - error
9282 **/
9283 static int
lpfc_sli_pci_mem_setup(struct lpfc_hba * phba)9284 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9285 {
9286 struct pci_dev *pdev = phba->pcidev;
9287 unsigned long bar0map_len, bar2map_len;
9288 int i, hbq_count;
9289 void *ptr;
9290 int error;
9291
9292 if (!pdev)
9293 return -ENODEV;
9294
9295 /* Set the device DMA mask size */
9296 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9297 if (error)
9298 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9299 if (error)
9300 return error;
9301 error = -ENODEV;
9302
9303 /* Get the bus address of Bar0 and Bar2 and the number of bytes
9304 * required by each mapping.
9305 */
9306 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9307 bar0map_len = pci_resource_len(pdev, 0);
9308
9309 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9310 bar2map_len = pci_resource_len(pdev, 2);
9311
9312 /* Map HBA SLIM to a kernel virtual address. */
9313 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9314 if (!phba->slim_memmap_p) {
9315 dev_printk(KERN_ERR, &pdev->dev,
9316 "ioremap failed for SLIM memory.\n");
9317 goto out;
9318 }
9319
9320 /* Map HBA Control Registers to a kernel virtual address. */
9321 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9322 if (!phba->ctrl_regs_memmap_p) {
9323 dev_printk(KERN_ERR, &pdev->dev,
9324 "ioremap failed for HBA control registers.\n");
9325 goto out_iounmap_slim;
9326 }
9327
9328 /* Allocate memory for SLI-2 structures */
9329 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9330 &phba->slim2p.phys, GFP_KERNEL);
9331 if (!phba->slim2p.virt)
9332 goto out_iounmap;
9333
9334 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9335 phba->mbox_ext = (phba->slim2p.virt +
9336 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9337 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9338 phba->IOCBs = (phba->slim2p.virt +
9339 offsetof(struct lpfc_sli2_slim, IOCBs));
9340
9341 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9342 lpfc_sli_hbq_size(),
9343 &phba->hbqslimp.phys,
9344 GFP_KERNEL);
9345 if (!phba->hbqslimp.virt)
9346 goto out_free_slim;
9347
9348 hbq_count = lpfc_sli_hbq_count();
9349 ptr = phba->hbqslimp.virt;
9350 for (i = 0; i < hbq_count; ++i) {
9351 phba->hbqs[i].hbq_virt = ptr;
9352 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9353 ptr += (lpfc_hbq_defs[i]->entry_count *
9354 sizeof(struct lpfc_hbq_entry));
9355 }
9356 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9357 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9358
9359 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9360
9361 phba->MBslimaddr = phba->slim_memmap_p;
9362 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9363 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9364 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9365 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9366
9367 return 0;
9368
9369 out_free_slim:
9370 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9371 phba->slim2p.virt, phba->slim2p.phys);
9372 out_iounmap:
9373 iounmap(phba->ctrl_regs_memmap_p);
9374 out_iounmap_slim:
9375 iounmap(phba->slim_memmap_p);
9376 out:
9377 return error;
9378 }
9379
9380 /**
9381 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9382 * @phba: pointer to lpfc hba data structure.
9383 *
9384 * This routine is invoked to unset the PCI device memory space for device
9385 * with SLI-3 interface spec.
9386 **/
9387 static void
lpfc_sli_pci_mem_unset(struct lpfc_hba * phba)9388 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9389 {
9390 struct pci_dev *pdev;
9391
9392 /* Obtain PCI device reference */
9393 if (!phba->pcidev)
9394 return;
9395 else
9396 pdev = phba->pcidev;
9397
9398 /* Free coherent DMA memory allocated */
9399 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9400 phba->hbqslimp.virt, phba->hbqslimp.phys);
9401 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9402 phba->slim2p.virt, phba->slim2p.phys);
9403
9404 /* I/O memory unmap */
9405 iounmap(phba->ctrl_regs_memmap_p);
9406 iounmap(phba->slim_memmap_p);
9407
9408 return;
9409 }
9410
9411 /**
9412 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9413 * @phba: pointer to lpfc hba data structure.
9414 *
9415 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9416 * done and check status.
9417 *
9418 * Return 0 if successful, otherwise -ENODEV.
9419 **/
9420 int
lpfc_sli4_post_status_check(struct lpfc_hba * phba)9421 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9422 {
9423 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9424 struct lpfc_register reg_data;
9425 int i, port_error = 0;
9426 uint32_t if_type;
9427
9428 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9429 memset(®_data, 0, sizeof(reg_data));
9430 if (!phba->sli4_hba.PSMPHRregaddr)
9431 return -ENODEV;
9432
9433 /* Wait up to 30 seconds for the SLI Port POST done and ready */
9434 for (i = 0; i < 3000; i++) {
9435 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9436 &portsmphr_reg.word0) ||
9437 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9438 /* Port has a fatal POST error, break out */
9439 port_error = -ENODEV;
9440 break;
9441 }
9442 if (LPFC_POST_STAGE_PORT_READY ==
9443 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9444 break;
9445 msleep(10);
9446 }
9447
9448 /*
9449 * If there was a port error during POST, then don't proceed with
9450 * other register reads as the data may not be valid. Just exit.
9451 */
9452 if (port_error) {
9453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9454 "1408 Port Failed POST - portsmphr=0x%x, "
9455 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9456 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9457 portsmphr_reg.word0,
9458 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9459 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9460 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9461 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9462 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9463 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9464 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9465 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9466 } else {
9467 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9468 "2534 Device Info: SLIFamily=0x%x, "
9469 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9470 "SLIHint_2=0x%x, FT=0x%x\n",
9471 bf_get(lpfc_sli_intf_sli_family,
9472 &phba->sli4_hba.sli_intf),
9473 bf_get(lpfc_sli_intf_slirev,
9474 &phba->sli4_hba.sli_intf),
9475 bf_get(lpfc_sli_intf_if_type,
9476 &phba->sli4_hba.sli_intf),
9477 bf_get(lpfc_sli_intf_sli_hint1,
9478 &phba->sli4_hba.sli_intf),
9479 bf_get(lpfc_sli_intf_sli_hint2,
9480 &phba->sli4_hba.sli_intf),
9481 bf_get(lpfc_sli_intf_func_type,
9482 &phba->sli4_hba.sli_intf));
9483 /*
9484 * Check for other Port errors during the initialization
9485 * process. Fail the load if the port did not come up
9486 * correctly.
9487 */
9488 if_type = bf_get(lpfc_sli_intf_if_type,
9489 &phba->sli4_hba.sli_intf);
9490 switch (if_type) {
9491 case LPFC_SLI_INTF_IF_TYPE_0:
9492 phba->sli4_hba.ue_mask_lo =
9493 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9494 phba->sli4_hba.ue_mask_hi =
9495 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9496 uerrlo_reg.word0 =
9497 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9498 uerrhi_reg.word0 =
9499 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9500 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9501 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9502 lpfc_printf_log(phba, KERN_ERR,
9503 LOG_TRACE_EVENT,
9504 "1422 Unrecoverable Error "
9505 "Detected during POST "
9506 "uerr_lo_reg=0x%x, "
9507 "uerr_hi_reg=0x%x, "
9508 "ue_mask_lo_reg=0x%x, "
9509 "ue_mask_hi_reg=0x%x\n",
9510 uerrlo_reg.word0,
9511 uerrhi_reg.word0,
9512 phba->sli4_hba.ue_mask_lo,
9513 phba->sli4_hba.ue_mask_hi);
9514 port_error = -ENODEV;
9515 }
9516 break;
9517 case LPFC_SLI_INTF_IF_TYPE_2:
9518 case LPFC_SLI_INTF_IF_TYPE_6:
9519 /* Final checks. The port status should be clean. */
9520 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9521 ®_data.word0) ||
9522 lpfc_sli4_unrecoverable_port(®_data)) {
9523 phba->work_status[0] =
9524 readl(phba->sli4_hba.u.if_type2.
9525 ERR1regaddr);
9526 phba->work_status[1] =
9527 readl(phba->sli4_hba.u.if_type2.
9528 ERR2regaddr);
9529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9530 "2888 Unrecoverable port error "
9531 "following POST: port status reg "
9532 "0x%x, port_smphr reg 0x%x, "
9533 "error 1=0x%x, error 2=0x%x\n",
9534 reg_data.word0,
9535 portsmphr_reg.word0,
9536 phba->work_status[0],
9537 phba->work_status[1]);
9538 port_error = -ENODEV;
9539 break;
9540 }
9541
9542 if (lpfc_pldv_detect &&
9543 bf_get(lpfc_sli_intf_sli_family,
9544 &phba->sli4_hba.sli_intf) ==
9545 LPFC_SLI_INTF_FAMILY_G6)
9546 pci_write_config_byte(phba->pcidev,
9547 LPFC_SLI_INTF, CFG_PLD);
9548 break;
9549 case LPFC_SLI_INTF_IF_TYPE_1:
9550 default:
9551 break;
9552 }
9553 }
9554 return port_error;
9555 }
9556
9557 /**
9558 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9559 * @phba: pointer to lpfc hba data structure.
9560 * @if_type: The SLI4 interface type getting configured.
9561 *
9562 * This routine is invoked to set up SLI4 BAR0 PCI config space register
9563 * memory map.
9564 **/
9565 static void
lpfc_sli4_bar0_register_memmap(struct lpfc_hba * phba,uint32_t if_type)9566 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9567 {
9568 switch (if_type) {
9569 case LPFC_SLI_INTF_IF_TYPE_0:
9570 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9571 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9572 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9573 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9574 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9575 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9576 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9577 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9578 phba->sli4_hba.SLIINTFregaddr =
9579 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9580 break;
9581 case LPFC_SLI_INTF_IF_TYPE_2:
9582 phba->sli4_hba.u.if_type2.EQDregaddr =
9583 phba->sli4_hba.conf_regs_memmap_p +
9584 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9585 phba->sli4_hba.u.if_type2.ERR1regaddr =
9586 phba->sli4_hba.conf_regs_memmap_p +
9587 LPFC_CTL_PORT_ER1_OFFSET;
9588 phba->sli4_hba.u.if_type2.ERR2regaddr =
9589 phba->sli4_hba.conf_regs_memmap_p +
9590 LPFC_CTL_PORT_ER2_OFFSET;
9591 phba->sli4_hba.u.if_type2.CTRLregaddr =
9592 phba->sli4_hba.conf_regs_memmap_p +
9593 LPFC_CTL_PORT_CTL_OFFSET;
9594 phba->sli4_hba.u.if_type2.STATUSregaddr =
9595 phba->sli4_hba.conf_regs_memmap_p +
9596 LPFC_CTL_PORT_STA_OFFSET;
9597 phba->sli4_hba.SLIINTFregaddr =
9598 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9599 phba->sli4_hba.PSMPHRregaddr =
9600 phba->sli4_hba.conf_regs_memmap_p +
9601 LPFC_CTL_PORT_SEM_OFFSET;
9602 phba->sli4_hba.RQDBregaddr =
9603 phba->sli4_hba.conf_regs_memmap_p +
9604 LPFC_ULP0_RQ_DOORBELL;
9605 phba->sli4_hba.WQDBregaddr =
9606 phba->sli4_hba.conf_regs_memmap_p +
9607 LPFC_ULP0_WQ_DOORBELL;
9608 phba->sli4_hba.CQDBregaddr =
9609 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9610 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9611 phba->sli4_hba.MQDBregaddr =
9612 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9613 phba->sli4_hba.BMBXregaddr =
9614 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9615 break;
9616 case LPFC_SLI_INTF_IF_TYPE_6:
9617 phba->sli4_hba.u.if_type2.EQDregaddr =
9618 phba->sli4_hba.conf_regs_memmap_p +
9619 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9620 phba->sli4_hba.u.if_type2.ERR1regaddr =
9621 phba->sli4_hba.conf_regs_memmap_p +
9622 LPFC_CTL_PORT_ER1_OFFSET;
9623 phba->sli4_hba.u.if_type2.ERR2regaddr =
9624 phba->sli4_hba.conf_regs_memmap_p +
9625 LPFC_CTL_PORT_ER2_OFFSET;
9626 phba->sli4_hba.u.if_type2.CTRLregaddr =
9627 phba->sli4_hba.conf_regs_memmap_p +
9628 LPFC_CTL_PORT_CTL_OFFSET;
9629 phba->sli4_hba.u.if_type2.STATUSregaddr =
9630 phba->sli4_hba.conf_regs_memmap_p +
9631 LPFC_CTL_PORT_STA_OFFSET;
9632 phba->sli4_hba.PSMPHRregaddr =
9633 phba->sli4_hba.conf_regs_memmap_p +
9634 LPFC_CTL_PORT_SEM_OFFSET;
9635 phba->sli4_hba.BMBXregaddr =
9636 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9637 break;
9638 case LPFC_SLI_INTF_IF_TYPE_1:
9639 default:
9640 dev_printk(KERN_ERR, &phba->pcidev->dev,
9641 "FATAL - unsupported SLI4 interface type - %d\n",
9642 if_type);
9643 break;
9644 }
9645 }
9646
9647 /**
9648 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9649 * @phba: pointer to lpfc hba data structure.
9650 * @if_type: sli if type to operate on.
9651 *
9652 * This routine is invoked to set up SLI4 BAR1 register memory map.
9653 **/
9654 static void
lpfc_sli4_bar1_register_memmap(struct lpfc_hba * phba,uint32_t if_type)9655 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9656 {
9657 switch (if_type) {
9658 case LPFC_SLI_INTF_IF_TYPE_0:
9659 phba->sli4_hba.PSMPHRregaddr =
9660 phba->sli4_hba.ctrl_regs_memmap_p +
9661 LPFC_SLIPORT_IF0_SMPHR;
9662 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9663 LPFC_HST_ISR0;
9664 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9665 LPFC_HST_IMR0;
9666 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9667 LPFC_HST_ISCR0;
9668 break;
9669 case LPFC_SLI_INTF_IF_TYPE_6:
9670 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9671 LPFC_IF6_RQ_DOORBELL;
9672 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9673 LPFC_IF6_WQ_DOORBELL;
9674 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9675 LPFC_IF6_CQ_DOORBELL;
9676 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9677 LPFC_IF6_EQ_DOORBELL;
9678 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9679 LPFC_IF6_MQ_DOORBELL;
9680 break;
9681 case LPFC_SLI_INTF_IF_TYPE_2:
9682 case LPFC_SLI_INTF_IF_TYPE_1:
9683 default:
9684 dev_err(&phba->pcidev->dev,
9685 "FATAL - unsupported SLI4 interface type - %d\n",
9686 if_type);
9687 break;
9688 }
9689 }
9690
9691 /**
9692 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9693 * @phba: pointer to lpfc hba data structure.
9694 * @vf: virtual function number
9695 *
9696 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9697 * based on the given viftual function number, @vf.
9698 *
9699 * Return 0 if successful, otherwise -ENODEV.
9700 **/
9701 static int
lpfc_sli4_bar2_register_memmap(struct lpfc_hba * phba,uint32_t vf)9702 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9703 {
9704 if (vf > LPFC_VIR_FUNC_MAX)
9705 return -ENODEV;
9706
9707 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9708 vf * LPFC_VFR_PAGE_SIZE +
9709 LPFC_ULP0_RQ_DOORBELL);
9710 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9711 vf * LPFC_VFR_PAGE_SIZE +
9712 LPFC_ULP0_WQ_DOORBELL);
9713 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9714 vf * LPFC_VFR_PAGE_SIZE +
9715 LPFC_EQCQ_DOORBELL);
9716 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9717 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9718 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9719 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9720 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9721 return 0;
9722 }
9723
9724 /**
9725 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9726 * @phba: pointer to lpfc hba data structure.
9727 *
9728 * This routine is invoked to create the bootstrap mailbox
9729 * region consistent with the SLI-4 interface spec. This
9730 * routine allocates all memory necessary to communicate
9731 * mailbox commands to the port and sets up all alignment
9732 * needs. No locks are expected to be held when calling
9733 * this routine.
9734 *
9735 * Return codes
9736 * 0 - successful
9737 * -ENOMEM - could not allocated memory.
9738 **/
9739 static int
lpfc_create_bootstrap_mbox(struct lpfc_hba * phba)9740 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9741 {
9742 uint32_t bmbx_size;
9743 struct lpfc_dmabuf *dmabuf;
9744 struct dma_address *dma_address;
9745 uint32_t pa_addr;
9746 uint64_t phys_addr;
9747
9748 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9749 if (!dmabuf)
9750 return -ENOMEM;
9751
9752 /*
9753 * The bootstrap mailbox region is comprised of 2 parts
9754 * plus an alignment restriction of 16 bytes.
9755 */
9756 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9757 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9758 &dmabuf->phys, GFP_KERNEL);
9759 if (!dmabuf->virt) {
9760 kfree(dmabuf);
9761 return -ENOMEM;
9762 }
9763
9764 /*
9765 * Initialize the bootstrap mailbox pointers now so that the register
9766 * operations are simple later. The mailbox dma address is required
9767 * to be 16-byte aligned. Also align the virtual memory as each
9768 * maibox is copied into the bmbx mailbox region before issuing the
9769 * command to the port.
9770 */
9771 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9772 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9773
9774 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9775 LPFC_ALIGN_16_BYTE);
9776 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9777 LPFC_ALIGN_16_BYTE);
9778
9779 /*
9780 * Set the high and low physical addresses now. The SLI4 alignment
9781 * requirement is 16 bytes and the mailbox is posted to the port
9782 * as two 30-bit addresses. The other data is a bit marking whether
9783 * the 30-bit address is the high or low address.
9784 * Upcast bmbx aphys to 64bits so shift instruction compiles
9785 * clean on 32 bit machines.
9786 */
9787 dma_address = &phba->sli4_hba.bmbx.dma_address;
9788 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9789 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9790 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9791 LPFC_BMBX_BIT1_ADDR_HI);
9792
9793 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9794 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9795 LPFC_BMBX_BIT1_ADDR_LO);
9796 return 0;
9797 }
9798
9799 /**
9800 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9801 * @phba: pointer to lpfc hba data structure.
9802 *
9803 * This routine is invoked to teardown the bootstrap mailbox
9804 * region and release all host resources. This routine requires
9805 * the caller to ensure all mailbox commands recovered, no
9806 * additional mailbox comands are sent, and interrupts are disabled
9807 * before calling this routine.
9808 *
9809 **/
9810 static void
lpfc_destroy_bootstrap_mbox(struct lpfc_hba * phba)9811 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9812 {
9813 dma_free_coherent(&phba->pcidev->dev,
9814 phba->sli4_hba.bmbx.bmbx_size,
9815 phba->sli4_hba.bmbx.dmabuf->virt,
9816 phba->sli4_hba.bmbx.dmabuf->phys);
9817
9818 kfree(phba->sli4_hba.bmbx.dmabuf);
9819 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9820 }
9821
9822 static const char * const lpfc_topo_to_str[] = {
9823 "Loop then P2P",
9824 "Loopback",
9825 "P2P Only",
9826 "Unsupported",
9827 "Loop Only",
9828 "Unsupported",
9829 "P2P then Loop",
9830 };
9831
9832 #define LINK_FLAGS_DEF 0x0
9833 #define LINK_FLAGS_P2P 0x1
9834 #define LINK_FLAGS_LOOP 0x2
9835 /**
9836 * lpfc_map_topology - Map the topology read from READ_CONFIG
9837 * @phba: pointer to lpfc hba data structure.
9838 * @rd_config: pointer to read config data
9839 *
9840 * This routine is invoked to map the topology values as read
9841 * from the read config mailbox command. If the persistent
9842 * topology feature is supported, the firmware will provide the
9843 * saved topology information to be used in INIT_LINK
9844 **/
9845 static void
lpfc_map_topology(struct lpfc_hba * phba,struct lpfc_mbx_read_config * rd_config)9846 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9847 {
9848 u8 ptv, tf, pt;
9849
9850 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9851 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9852 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9853
9854 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9855 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9856 ptv, tf, pt);
9857 if (!ptv) {
9858 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9859 "2019 FW does not support persistent topology "
9860 "Using driver parameter defined value [%s]",
9861 lpfc_topo_to_str[phba->cfg_topology]);
9862 return;
9863 }
9864 /* FW supports persistent topology - override module parameter value */
9865 phba->hba_flag |= HBA_PERSISTENT_TOPO;
9866
9867 /* if ASIC_GEN_NUM >= 0xC) */
9868 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9869 LPFC_SLI_INTF_IF_TYPE_6) ||
9870 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9871 LPFC_SLI_INTF_FAMILY_G6)) {
9872 if (!tf) {
9873 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9874 ? FLAGS_TOPOLOGY_MODE_LOOP
9875 : FLAGS_TOPOLOGY_MODE_PT_PT);
9876 } else {
9877 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9878 }
9879 } else { /* G5 */
9880 if (tf) {
9881 /* If topology failover set - pt is '0' or '1' */
9882 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9883 FLAGS_TOPOLOGY_MODE_LOOP_PT);
9884 } else {
9885 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9886 ? FLAGS_TOPOLOGY_MODE_PT_PT
9887 : FLAGS_TOPOLOGY_MODE_LOOP);
9888 }
9889 }
9890 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9891 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9892 "2020 Using persistent topology value [%s]",
9893 lpfc_topo_to_str[phba->cfg_topology]);
9894 } else {
9895 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9896 "2021 Invalid topology values from FW "
9897 "Using driver parameter defined value [%s]",
9898 lpfc_topo_to_str[phba->cfg_topology]);
9899 }
9900 }
9901
9902 /**
9903 * lpfc_sli4_read_config - Get the config parameters.
9904 * @phba: pointer to lpfc hba data structure.
9905 *
9906 * This routine is invoked to read the configuration parameters from the HBA.
9907 * The configuration parameters are used to set the base and maximum values
9908 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9909 * allocation for the port.
9910 *
9911 * Return codes
9912 * 0 - successful
9913 * -ENOMEM - No available memory
9914 * -EIO - The mailbox failed to complete successfully.
9915 **/
9916 int
lpfc_sli4_read_config(struct lpfc_hba * phba)9917 lpfc_sli4_read_config(struct lpfc_hba *phba)
9918 {
9919 LPFC_MBOXQ_t *pmb;
9920 struct lpfc_mbx_read_config *rd_config;
9921 union lpfc_sli4_cfg_shdr *shdr;
9922 uint32_t shdr_status, shdr_add_status;
9923 struct lpfc_mbx_get_func_cfg *get_func_cfg;
9924 struct lpfc_rsrc_desc_fcfcoe *desc;
9925 char *pdesc_0;
9926 uint16_t forced_link_speed;
9927 uint32_t if_type, qmin, fawwpn;
9928 int length, i, rc = 0, rc2;
9929
9930 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9931 if (!pmb) {
9932 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9933 "2011 Unable to allocate memory for issuing "
9934 "SLI_CONFIG_SPECIAL mailbox command\n");
9935 return -ENOMEM;
9936 }
9937
9938 lpfc_read_config(phba, pmb);
9939
9940 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9941 if (rc != MBX_SUCCESS) {
9942 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9943 "2012 Mailbox failed , mbxCmd x%x "
9944 "READ_CONFIG, mbxStatus x%x\n",
9945 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9946 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9947 rc = -EIO;
9948 } else {
9949 rd_config = &pmb->u.mqe.un.rd_config;
9950 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9951 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9952 phba->sli4_hba.lnk_info.lnk_tp =
9953 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9954 phba->sli4_hba.lnk_info.lnk_no =
9955 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9956 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9957 "3081 lnk_type:%d, lnk_numb:%d\n",
9958 phba->sli4_hba.lnk_info.lnk_tp,
9959 phba->sli4_hba.lnk_info.lnk_no);
9960 } else
9961 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9962 "3082 Mailbox (x%x) returned ldv:x0\n",
9963 bf_get(lpfc_mqe_command, &pmb->u.mqe));
9964 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9965 phba->bbcredit_support = 1;
9966 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9967 }
9968
9969 fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
9970
9971 if (fawwpn) {
9972 lpfc_printf_log(phba, KERN_INFO,
9973 LOG_INIT | LOG_DISCOVERY,
9974 "2702 READ_CONFIG: FA-PWWN is "
9975 "configured on\n");
9976 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9977 } else {
9978 /* Clear FW configured flag, preserve driver flag */
9979 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
9980 }
9981
9982 phba->sli4_hba.conf_trunk =
9983 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9984 phba->sli4_hba.extents_in_use =
9985 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9986
9987 phba->sli4_hba.max_cfg_param.max_xri =
9988 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9989 /* Reduce resource usage in kdump environment */
9990 if (is_kdump_kernel() &&
9991 phba->sli4_hba.max_cfg_param.max_xri > 512)
9992 phba->sli4_hba.max_cfg_param.max_xri = 512;
9993 phba->sli4_hba.max_cfg_param.xri_base =
9994 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9995 phba->sli4_hba.max_cfg_param.max_vpi =
9996 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9997 /* Limit the max we support */
9998 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9999 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
10000 phba->sli4_hba.max_cfg_param.vpi_base =
10001 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
10002 phba->sli4_hba.max_cfg_param.max_rpi =
10003 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
10004 phba->sli4_hba.max_cfg_param.rpi_base =
10005 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
10006 phba->sli4_hba.max_cfg_param.max_vfi =
10007 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
10008 phba->sli4_hba.max_cfg_param.vfi_base =
10009 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
10010 phba->sli4_hba.max_cfg_param.max_fcfi =
10011 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
10012 phba->sli4_hba.max_cfg_param.max_eq =
10013 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
10014 phba->sli4_hba.max_cfg_param.max_rq =
10015 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
10016 phba->sli4_hba.max_cfg_param.max_wq =
10017 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
10018 phba->sli4_hba.max_cfg_param.max_cq =
10019 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
10020 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10021 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10022 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10023 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10024 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10025 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10026 phba->max_vports = phba->max_vpi;
10027
10028 /* Next decide on FPIN or Signal E2E CGN support
10029 * For congestion alarms and warnings valid combination are:
10030 * 1. FPIN alarms / FPIN warnings
10031 * 2. Signal alarms / Signal warnings
10032 * 3. FPIN alarms / Signal warnings
10033 * 4. Signal alarms / FPIN warnings
10034 *
10035 * Initialize the adapter frequency to 100 mSecs
10036 */
10037 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10038 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10039 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10040
10041 if (lpfc_use_cgn_signal) {
10042 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
10043 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10044 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10045 }
10046 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
10047 /* MUST support both alarm and warning
10048 * because EDC does not support alarm alone.
10049 */
10050 if (phba->cgn_reg_signal !=
10051 EDC_CG_SIG_WARN_ONLY) {
10052 /* Must support both or none */
10053 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10054 phba->cgn_reg_signal =
10055 EDC_CG_SIG_NOTSUPPORTED;
10056 } else {
10057 phba->cgn_reg_signal =
10058 EDC_CG_SIG_WARN_ALARM;
10059 phba->cgn_reg_fpin =
10060 LPFC_CGN_FPIN_NONE;
10061 }
10062 }
10063 }
10064
10065 /* Set the congestion initial signal and fpin values. */
10066 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10067 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10068
10069 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10070 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10071 phba->cgn_reg_signal, phba->cgn_reg_fpin);
10072
10073 lpfc_map_topology(phba, rd_config);
10074 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10075 "2003 cfg params Extents? %d "
10076 "XRI(B:%d M:%d), "
10077 "VPI(B:%d M:%d) "
10078 "VFI(B:%d M:%d) "
10079 "RPI(B:%d M:%d) "
10080 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10081 phba->sli4_hba.extents_in_use,
10082 phba->sli4_hba.max_cfg_param.xri_base,
10083 phba->sli4_hba.max_cfg_param.max_xri,
10084 phba->sli4_hba.max_cfg_param.vpi_base,
10085 phba->sli4_hba.max_cfg_param.max_vpi,
10086 phba->sli4_hba.max_cfg_param.vfi_base,
10087 phba->sli4_hba.max_cfg_param.max_vfi,
10088 phba->sli4_hba.max_cfg_param.rpi_base,
10089 phba->sli4_hba.max_cfg_param.max_rpi,
10090 phba->sli4_hba.max_cfg_param.max_fcfi,
10091 phba->sli4_hba.max_cfg_param.max_eq,
10092 phba->sli4_hba.max_cfg_param.max_cq,
10093 phba->sli4_hba.max_cfg_param.max_wq,
10094 phba->sli4_hba.max_cfg_param.max_rq,
10095 phba->lmt);
10096
10097 /*
10098 * Calculate queue resources based on how
10099 * many WQ/CQ/EQs are available.
10100 */
10101 qmin = phba->sli4_hba.max_cfg_param.max_wq;
10102 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10103 qmin = phba->sli4_hba.max_cfg_param.max_cq;
10104 /*
10105 * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and
10106 * the remainder can be used for NVME / FCP.
10107 */
10108 qmin -= 4;
10109 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10110 qmin = phba->sli4_hba.max_cfg_param.max_eq;
10111
10112 /* Check to see if there is enough for default cfg */
10113 if ((phba->cfg_irq_chann > qmin) ||
10114 (phba->cfg_hdw_queue > qmin)) {
10115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10116 "2005 Reducing Queues - "
10117 "FW resource limitation: "
10118 "WQ %d CQ %d EQ %d: min %d: "
10119 "IRQ %d HDWQ %d\n",
10120 phba->sli4_hba.max_cfg_param.max_wq,
10121 phba->sli4_hba.max_cfg_param.max_cq,
10122 phba->sli4_hba.max_cfg_param.max_eq,
10123 qmin, phba->cfg_irq_chann,
10124 phba->cfg_hdw_queue);
10125
10126 if (phba->cfg_irq_chann > qmin)
10127 phba->cfg_irq_chann = qmin;
10128 if (phba->cfg_hdw_queue > qmin)
10129 phba->cfg_hdw_queue = qmin;
10130 }
10131 }
10132
10133 if (rc)
10134 goto read_cfg_out;
10135
10136 /* Update link speed if forced link speed is supported */
10137 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10138 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10139 forced_link_speed =
10140 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10141 if (forced_link_speed) {
10142 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
10143
10144 switch (forced_link_speed) {
10145 case LINK_SPEED_1G:
10146 phba->cfg_link_speed =
10147 LPFC_USER_LINK_SPEED_1G;
10148 break;
10149 case LINK_SPEED_2G:
10150 phba->cfg_link_speed =
10151 LPFC_USER_LINK_SPEED_2G;
10152 break;
10153 case LINK_SPEED_4G:
10154 phba->cfg_link_speed =
10155 LPFC_USER_LINK_SPEED_4G;
10156 break;
10157 case LINK_SPEED_8G:
10158 phba->cfg_link_speed =
10159 LPFC_USER_LINK_SPEED_8G;
10160 break;
10161 case LINK_SPEED_10G:
10162 phba->cfg_link_speed =
10163 LPFC_USER_LINK_SPEED_10G;
10164 break;
10165 case LINK_SPEED_16G:
10166 phba->cfg_link_speed =
10167 LPFC_USER_LINK_SPEED_16G;
10168 break;
10169 case LINK_SPEED_32G:
10170 phba->cfg_link_speed =
10171 LPFC_USER_LINK_SPEED_32G;
10172 break;
10173 case LINK_SPEED_64G:
10174 phba->cfg_link_speed =
10175 LPFC_USER_LINK_SPEED_64G;
10176 break;
10177 case 0xffff:
10178 phba->cfg_link_speed =
10179 LPFC_USER_LINK_SPEED_AUTO;
10180 break;
10181 default:
10182 lpfc_printf_log(phba, KERN_ERR,
10183 LOG_TRACE_EVENT,
10184 "0047 Unrecognized link "
10185 "speed : %d\n",
10186 forced_link_speed);
10187 phba->cfg_link_speed =
10188 LPFC_USER_LINK_SPEED_AUTO;
10189 }
10190 }
10191 }
10192
10193 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
10194 length = phba->sli4_hba.max_cfg_param.max_xri -
10195 lpfc_sli4_get_els_iocb_cnt(phba);
10196 if (phba->cfg_hba_queue_depth > length) {
10197 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10198 "3361 HBA queue depth changed from %d to %d\n",
10199 phba->cfg_hba_queue_depth, length);
10200 phba->cfg_hba_queue_depth = length;
10201 }
10202
10203 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10204 LPFC_SLI_INTF_IF_TYPE_2)
10205 goto read_cfg_out;
10206
10207 /* get the pf# and vf# for SLI4 if_type 2 port */
10208 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10209 sizeof(struct lpfc_sli4_cfg_mhdr));
10210 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10211 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10212 length, LPFC_SLI4_MBX_EMBED);
10213
10214 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10215 shdr = (union lpfc_sli4_cfg_shdr *)
10216 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10217 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10218 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10219 if (rc2 || shdr_status || shdr_add_status) {
10220 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10221 "3026 Mailbox failed , mbxCmd x%x "
10222 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10223 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10224 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10225 goto read_cfg_out;
10226 }
10227
10228 /* search for fc_fcoe resrouce descriptor */
10229 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10230
10231 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10232 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10233 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10234 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10235 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10236 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10237 goto read_cfg_out;
10238
10239 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10240 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10241 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10242 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10243 phba->sli4_hba.iov.pf_number =
10244 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10245 phba->sli4_hba.iov.vf_number =
10246 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10247 break;
10248 }
10249 }
10250
10251 if (i < LPFC_RSRC_DESC_MAX_NUM)
10252 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10253 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10254 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10255 phba->sli4_hba.iov.vf_number);
10256 else
10257 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10258 "3028 GET_FUNCTION_CONFIG: failed to find "
10259 "Resource Descriptor:x%x\n",
10260 LPFC_RSRC_DESC_TYPE_FCFCOE);
10261
10262 read_cfg_out:
10263 mempool_free(pmb, phba->mbox_mem_pool);
10264 return rc;
10265 }
10266
10267 /**
10268 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10269 * @phba: pointer to lpfc hba data structure.
10270 *
10271 * This routine is invoked to setup the port-side endian order when
10272 * the port if_type is 0. This routine has no function for other
10273 * if_types.
10274 *
10275 * Return codes
10276 * 0 - successful
10277 * -ENOMEM - No available memory
10278 * -EIO - The mailbox failed to complete successfully.
10279 **/
10280 static int
lpfc_setup_endian_order(struct lpfc_hba * phba)10281 lpfc_setup_endian_order(struct lpfc_hba *phba)
10282 {
10283 LPFC_MBOXQ_t *mboxq;
10284 uint32_t if_type, rc = 0;
10285 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10286 HOST_ENDIAN_HIGH_WORD1};
10287
10288 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10289 switch (if_type) {
10290 case LPFC_SLI_INTF_IF_TYPE_0:
10291 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10292 GFP_KERNEL);
10293 if (!mboxq) {
10294 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10295 "0492 Unable to allocate memory for "
10296 "issuing SLI_CONFIG_SPECIAL mailbox "
10297 "command\n");
10298 return -ENOMEM;
10299 }
10300
10301 /*
10302 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10303 * two words to contain special data values and no other data.
10304 */
10305 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10306 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10307 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10308 if (rc != MBX_SUCCESS) {
10309 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10310 "0493 SLI_CONFIG_SPECIAL mailbox "
10311 "failed with status x%x\n",
10312 rc);
10313 rc = -EIO;
10314 }
10315 mempool_free(mboxq, phba->mbox_mem_pool);
10316 break;
10317 case LPFC_SLI_INTF_IF_TYPE_6:
10318 case LPFC_SLI_INTF_IF_TYPE_2:
10319 case LPFC_SLI_INTF_IF_TYPE_1:
10320 default:
10321 break;
10322 }
10323 return rc;
10324 }
10325
10326 /**
10327 * lpfc_sli4_queue_verify - Verify and update EQ counts
10328 * @phba: pointer to lpfc hba data structure.
10329 *
10330 * This routine is invoked to check the user settable queue counts for EQs.
10331 * After this routine is called the counts will be set to valid values that
10332 * adhere to the constraints of the system's interrupt vectors and the port's
10333 * queue resources.
10334 *
10335 * Return codes
10336 * 0 - successful
10337 * -ENOMEM - No available memory
10338 **/
10339 static int
lpfc_sli4_queue_verify(struct lpfc_hba * phba)10340 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10341 {
10342 /*
10343 * Sanity check for configured queue parameters against the run-time
10344 * device parameters
10345 */
10346
10347 if (phba->nvmet_support) {
10348 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10349 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10350 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10351 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10352 }
10353
10354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10355 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10356 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10357 phba->cfg_nvmet_mrq);
10358
10359 /* Get EQ depth from module parameter, fake the default for now */
10360 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10361 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10362
10363 /* Get CQ depth from module parameter, fake the default for now */
10364 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10365 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10366 return 0;
10367 }
10368
10369 static int
lpfc_alloc_io_wq_cq(struct lpfc_hba * phba,int idx)10370 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10371 {
10372 struct lpfc_queue *qdesc;
10373 u32 wqesize;
10374 int cpu;
10375
10376 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10377 /* Create Fast Path IO CQs */
10378 if (phba->enab_exp_wqcq_pages)
10379 /* Increase the CQ size when WQEs contain an embedded cdb */
10380 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10381 phba->sli4_hba.cq_esize,
10382 LPFC_CQE_EXP_COUNT, cpu);
10383
10384 else
10385 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10386 phba->sli4_hba.cq_esize,
10387 phba->sli4_hba.cq_ecount, cpu);
10388 if (!qdesc) {
10389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10390 "0499 Failed allocate fast-path IO CQ (%d)\n",
10391 idx);
10392 return 1;
10393 }
10394 qdesc->qe_valid = 1;
10395 qdesc->hdwq = idx;
10396 qdesc->chann = cpu;
10397 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10398
10399 /* Create Fast Path IO WQs */
10400 if (phba->enab_exp_wqcq_pages) {
10401 /* Increase the WQ size when WQEs contain an embedded cdb */
10402 wqesize = (phba->fcp_embed_io) ?
10403 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10404 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10405 wqesize,
10406 LPFC_WQE_EXP_COUNT, cpu);
10407 } else
10408 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10409 phba->sli4_hba.wq_esize,
10410 phba->sli4_hba.wq_ecount, cpu);
10411
10412 if (!qdesc) {
10413 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10414 "0503 Failed allocate fast-path IO WQ (%d)\n",
10415 idx);
10416 return 1;
10417 }
10418 qdesc->hdwq = idx;
10419 qdesc->chann = cpu;
10420 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10421 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10422 return 0;
10423 }
10424
10425 /**
10426 * lpfc_sli4_queue_create - Create all the SLI4 queues
10427 * @phba: pointer to lpfc hba data structure.
10428 *
10429 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10430 * operation. For each SLI4 queue type, the parameters such as queue entry
10431 * count (queue depth) shall be taken from the module parameter. For now,
10432 * we just use some constant number as place holder.
10433 *
10434 * Return codes
10435 * 0 - successful
10436 * -ENOMEM - No availble memory
10437 * -EIO - The mailbox failed to complete successfully.
10438 **/
10439 int
lpfc_sli4_queue_create(struct lpfc_hba * phba)10440 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10441 {
10442 struct lpfc_queue *qdesc;
10443 int idx, cpu, eqcpu;
10444 struct lpfc_sli4_hdw_queue *qp;
10445 struct lpfc_vector_map_info *cpup;
10446 struct lpfc_vector_map_info *eqcpup;
10447 struct lpfc_eq_intr_info *eqi;
10448
10449 /*
10450 * Create HBA Record arrays.
10451 * Both NVME and FCP will share that same vectors / EQs
10452 */
10453 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10454 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10455 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10456 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10457 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10458 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10459 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10460 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10461 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10462 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10463
10464 if (!phba->sli4_hba.hdwq) {
10465 phba->sli4_hba.hdwq = kcalloc(
10466 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10467 GFP_KERNEL);
10468 if (!phba->sli4_hba.hdwq) {
10469 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10470 "6427 Failed allocate memory for "
10471 "fast-path Hardware Queue array\n");
10472 goto out_error;
10473 }
10474 /* Prepare hardware queues to take IO buffers */
10475 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10476 qp = &phba->sli4_hba.hdwq[idx];
10477 spin_lock_init(&qp->io_buf_list_get_lock);
10478 spin_lock_init(&qp->io_buf_list_put_lock);
10479 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10480 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10481 qp->get_io_bufs = 0;
10482 qp->put_io_bufs = 0;
10483 qp->total_io_bufs = 0;
10484 spin_lock_init(&qp->abts_io_buf_list_lock);
10485 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10486 qp->abts_scsi_io_bufs = 0;
10487 qp->abts_nvme_io_bufs = 0;
10488 INIT_LIST_HEAD(&qp->sgl_list);
10489 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10490 spin_lock_init(&qp->hdwq_lock);
10491 }
10492 }
10493
10494 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10495 if (phba->nvmet_support) {
10496 phba->sli4_hba.nvmet_cqset = kcalloc(
10497 phba->cfg_nvmet_mrq,
10498 sizeof(struct lpfc_queue *),
10499 GFP_KERNEL);
10500 if (!phba->sli4_hba.nvmet_cqset) {
10501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10502 "3121 Fail allocate memory for "
10503 "fast-path CQ set array\n");
10504 goto out_error;
10505 }
10506 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10507 phba->cfg_nvmet_mrq,
10508 sizeof(struct lpfc_queue *),
10509 GFP_KERNEL);
10510 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10511 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10512 "3122 Fail allocate memory for "
10513 "fast-path RQ set hdr array\n");
10514 goto out_error;
10515 }
10516 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10517 phba->cfg_nvmet_mrq,
10518 sizeof(struct lpfc_queue *),
10519 GFP_KERNEL);
10520 if (!phba->sli4_hba.nvmet_mrq_data) {
10521 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10522 "3124 Fail allocate memory for "
10523 "fast-path RQ set data array\n");
10524 goto out_error;
10525 }
10526 }
10527 }
10528
10529 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10530
10531 /* Create HBA Event Queues (EQs) */
10532 for_each_present_cpu(cpu) {
10533 /* We only want to create 1 EQ per vector, even though
10534 * multiple CPUs might be using that vector. so only
10535 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10536 */
10537 cpup = &phba->sli4_hba.cpu_map[cpu];
10538 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10539 continue;
10540
10541 /* Get a ptr to the Hardware Queue associated with this CPU */
10542 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10543
10544 /* Allocate an EQ */
10545 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10546 phba->sli4_hba.eq_esize,
10547 phba->sli4_hba.eq_ecount, cpu);
10548 if (!qdesc) {
10549 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10550 "0497 Failed allocate EQ (%d)\n",
10551 cpup->hdwq);
10552 goto out_error;
10553 }
10554 qdesc->qe_valid = 1;
10555 qdesc->hdwq = cpup->hdwq;
10556 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10557 qdesc->last_cpu = qdesc->chann;
10558
10559 /* Save the allocated EQ in the Hardware Queue */
10560 qp->hba_eq = qdesc;
10561
10562 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10563 list_add(&qdesc->cpu_list, &eqi->list);
10564 }
10565
10566 /* Now we need to populate the other Hardware Queues, that share
10567 * an IRQ vector, with the associated EQ ptr.
10568 */
10569 for_each_present_cpu(cpu) {
10570 cpup = &phba->sli4_hba.cpu_map[cpu];
10571
10572 /* Check for EQ already allocated in previous loop */
10573 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10574 continue;
10575
10576 /* Check for multiple CPUs per hdwq */
10577 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10578 if (qp->hba_eq)
10579 continue;
10580
10581 /* We need to share an EQ for this hdwq */
10582 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10583 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10584 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10585 }
10586
10587 /* Allocate IO Path SLI4 CQ/WQs */
10588 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10589 if (lpfc_alloc_io_wq_cq(phba, idx))
10590 goto out_error;
10591 }
10592
10593 if (phba->nvmet_support) {
10594 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10595 cpu = lpfc_find_cpu_handle(phba, idx,
10596 LPFC_FIND_BY_HDWQ);
10597 qdesc = lpfc_sli4_queue_alloc(phba,
10598 LPFC_DEFAULT_PAGE_SIZE,
10599 phba->sli4_hba.cq_esize,
10600 phba->sli4_hba.cq_ecount,
10601 cpu);
10602 if (!qdesc) {
10603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10604 "3142 Failed allocate NVME "
10605 "CQ Set (%d)\n", idx);
10606 goto out_error;
10607 }
10608 qdesc->qe_valid = 1;
10609 qdesc->hdwq = idx;
10610 qdesc->chann = cpu;
10611 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10612 }
10613 }
10614
10615 /*
10616 * Create Slow Path Completion Queues (CQs)
10617 */
10618
10619 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10620 /* Create slow-path Mailbox Command Complete Queue */
10621 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10622 phba->sli4_hba.cq_esize,
10623 phba->sli4_hba.cq_ecount, cpu);
10624 if (!qdesc) {
10625 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10626 "0500 Failed allocate slow-path mailbox CQ\n");
10627 goto out_error;
10628 }
10629 qdesc->qe_valid = 1;
10630 phba->sli4_hba.mbx_cq = qdesc;
10631
10632 /* Create slow-path ELS Complete Queue */
10633 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10634 phba->sli4_hba.cq_esize,
10635 phba->sli4_hba.cq_ecount, cpu);
10636 if (!qdesc) {
10637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10638 "0501 Failed allocate slow-path ELS CQ\n");
10639 goto out_error;
10640 }
10641 qdesc->qe_valid = 1;
10642 qdesc->chann = cpu;
10643 phba->sli4_hba.els_cq = qdesc;
10644
10645
10646 /*
10647 * Create Slow Path Work Queues (WQs)
10648 */
10649
10650 /* Create Mailbox Command Queue */
10651
10652 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10653 phba->sli4_hba.mq_esize,
10654 phba->sli4_hba.mq_ecount, cpu);
10655 if (!qdesc) {
10656 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10657 "0505 Failed allocate slow-path MQ\n");
10658 goto out_error;
10659 }
10660 qdesc->chann = cpu;
10661 phba->sli4_hba.mbx_wq = qdesc;
10662
10663 /*
10664 * Create ELS Work Queues
10665 */
10666
10667 /* Create slow-path ELS Work Queue */
10668 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10669 phba->sli4_hba.wq_esize,
10670 phba->sli4_hba.wq_ecount, cpu);
10671 if (!qdesc) {
10672 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10673 "0504 Failed allocate slow-path ELS WQ\n");
10674 goto out_error;
10675 }
10676 qdesc->chann = cpu;
10677 phba->sli4_hba.els_wq = qdesc;
10678 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10679
10680 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10681 /* Create NVME LS Complete Queue */
10682 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10683 phba->sli4_hba.cq_esize,
10684 phba->sli4_hba.cq_ecount, cpu);
10685 if (!qdesc) {
10686 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10687 "6079 Failed allocate NVME LS CQ\n");
10688 goto out_error;
10689 }
10690 qdesc->chann = cpu;
10691 qdesc->qe_valid = 1;
10692 phba->sli4_hba.nvmels_cq = qdesc;
10693
10694 /* Create NVME LS Work Queue */
10695 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10696 phba->sli4_hba.wq_esize,
10697 phba->sli4_hba.wq_ecount, cpu);
10698 if (!qdesc) {
10699 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10700 "6080 Failed allocate NVME LS WQ\n");
10701 goto out_error;
10702 }
10703 qdesc->chann = cpu;
10704 phba->sli4_hba.nvmels_wq = qdesc;
10705 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10706 }
10707
10708 /*
10709 * Create Receive Queue (RQ)
10710 */
10711
10712 /* Create Receive Queue for header */
10713 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10714 phba->sli4_hba.rq_esize,
10715 phba->sli4_hba.rq_ecount, cpu);
10716 if (!qdesc) {
10717 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10718 "0506 Failed allocate receive HRQ\n");
10719 goto out_error;
10720 }
10721 phba->sli4_hba.hdr_rq = qdesc;
10722
10723 /* Create Receive Queue for data */
10724 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10725 phba->sli4_hba.rq_esize,
10726 phba->sli4_hba.rq_ecount, cpu);
10727 if (!qdesc) {
10728 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10729 "0507 Failed allocate receive DRQ\n");
10730 goto out_error;
10731 }
10732 phba->sli4_hba.dat_rq = qdesc;
10733
10734 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10735 phba->nvmet_support) {
10736 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10737 cpu = lpfc_find_cpu_handle(phba, idx,
10738 LPFC_FIND_BY_HDWQ);
10739 /* Create NVMET Receive Queue for header */
10740 qdesc = lpfc_sli4_queue_alloc(phba,
10741 LPFC_DEFAULT_PAGE_SIZE,
10742 phba->sli4_hba.rq_esize,
10743 LPFC_NVMET_RQE_DEF_COUNT,
10744 cpu);
10745 if (!qdesc) {
10746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10747 "3146 Failed allocate "
10748 "receive HRQ\n");
10749 goto out_error;
10750 }
10751 qdesc->hdwq = idx;
10752 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10753
10754 /* Only needed for header of RQ pair */
10755 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10756 GFP_KERNEL,
10757 cpu_to_node(cpu));
10758 if (qdesc->rqbp == NULL) {
10759 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10760 "6131 Failed allocate "
10761 "Header RQBP\n");
10762 goto out_error;
10763 }
10764
10765 /* Put list in known state in case driver load fails. */
10766 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10767
10768 /* Create NVMET Receive Queue for data */
10769 qdesc = lpfc_sli4_queue_alloc(phba,
10770 LPFC_DEFAULT_PAGE_SIZE,
10771 phba->sli4_hba.rq_esize,
10772 LPFC_NVMET_RQE_DEF_COUNT,
10773 cpu);
10774 if (!qdesc) {
10775 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10776 "3156 Failed allocate "
10777 "receive DRQ\n");
10778 goto out_error;
10779 }
10780 qdesc->hdwq = idx;
10781 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10782 }
10783 }
10784
10785 /* Clear NVME stats */
10786 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10787 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10788 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10789 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10790 }
10791 }
10792
10793 /* Clear SCSI stats */
10794 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10795 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10796 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10797 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10798 }
10799 }
10800
10801 return 0;
10802
10803 out_error:
10804 lpfc_sli4_queue_destroy(phba);
10805 return -ENOMEM;
10806 }
10807
10808 static inline void
__lpfc_sli4_release_queue(struct lpfc_queue ** qp)10809 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10810 {
10811 if (*qp != NULL) {
10812 lpfc_sli4_queue_free(*qp);
10813 *qp = NULL;
10814 }
10815 }
10816
10817 static inline void
lpfc_sli4_release_queues(struct lpfc_queue *** qs,int max)10818 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10819 {
10820 int idx;
10821
10822 if (*qs == NULL)
10823 return;
10824
10825 for (idx = 0; idx < max; idx++)
10826 __lpfc_sli4_release_queue(&(*qs)[idx]);
10827
10828 kfree(*qs);
10829 *qs = NULL;
10830 }
10831
10832 static inline void
lpfc_sli4_release_hdwq(struct lpfc_hba * phba)10833 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10834 {
10835 struct lpfc_sli4_hdw_queue *hdwq;
10836 struct lpfc_queue *eq;
10837 uint32_t idx;
10838
10839 hdwq = phba->sli4_hba.hdwq;
10840
10841 /* Loop thru all Hardware Queues */
10842 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10843 /* Free the CQ/WQ corresponding to the Hardware Queue */
10844 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10845 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10846 hdwq[idx].hba_eq = NULL;
10847 hdwq[idx].io_cq = NULL;
10848 hdwq[idx].io_wq = NULL;
10849 if (phba->cfg_xpsgl && !phba->nvmet_support)
10850 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10851 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10852 }
10853 /* Loop thru all IRQ vectors */
10854 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10855 /* Free the EQ corresponding to the IRQ vector */
10856 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10857 lpfc_sli4_queue_free(eq);
10858 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10859 }
10860 }
10861
10862 /**
10863 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10864 * @phba: pointer to lpfc hba data structure.
10865 *
10866 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10867 * operation.
10868 *
10869 * Return codes
10870 * 0 - successful
10871 * -ENOMEM - No available memory
10872 * -EIO - The mailbox failed to complete successfully.
10873 **/
10874 void
lpfc_sli4_queue_destroy(struct lpfc_hba * phba)10875 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10876 {
10877 /*
10878 * Set FREE_INIT before beginning to free the queues.
10879 * Wait until the users of queues to acknowledge to
10880 * release queues by clearing FREE_WAIT.
10881 */
10882 spin_lock_irq(&phba->hbalock);
10883 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10884 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10885 spin_unlock_irq(&phba->hbalock);
10886 msleep(20);
10887 spin_lock_irq(&phba->hbalock);
10888 }
10889 spin_unlock_irq(&phba->hbalock);
10890
10891 lpfc_sli4_cleanup_poll_list(phba);
10892
10893 /* Release HBA eqs */
10894 if (phba->sli4_hba.hdwq)
10895 lpfc_sli4_release_hdwq(phba);
10896
10897 if (phba->nvmet_support) {
10898 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10899 phba->cfg_nvmet_mrq);
10900
10901 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10902 phba->cfg_nvmet_mrq);
10903 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10904 phba->cfg_nvmet_mrq);
10905 }
10906
10907 /* Release mailbox command work queue */
10908 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10909
10910 /* Release ELS work queue */
10911 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10912
10913 /* Release ELS work queue */
10914 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10915
10916 /* Release unsolicited receive queue */
10917 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10918 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10919
10920 /* Release ELS complete queue */
10921 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10922
10923 /* Release NVME LS complete queue */
10924 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10925
10926 /* Release mailbox command complete queue */
10927 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10928
10929 /* Everything on this list has been freed */
10930 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10931
10932 /* Done with freeing the queues */
10933 spin_lock_irq(&phba->hbalock);
10934 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10935 spin_unlock_irq(&phba->hbalock);
10936 }
10937
10938 int
lpfc_free_rq_buffer(struct lpfc_hba * phba,struct lpfc_queue * rq)10939 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10940 {
10941 struct lpfc_rqb *rqbp;
10942 struct lpfc_dmabuf *h_buf;
10943 struct rqb_dmabuf *rqb_buffer;
10944
10945 rqbp = rq->rqbp;
10946 while (!list_empty(&rqbp->rqb_buffer_list)) {
10947 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10948 struct lpfc_dmabuf, list);
10949
10950 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10951 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10952 rqbp->buffer_count--;
10953 }
10954 return 1;
10955 }
10956
10957 static int
lpfc_create_wq_cq(struct lpfc_hba * phba,struct lpfc_queue * eq,struct lpfc_queue * cq,struct lpfc_queue * wq,uint16_t * cq_map,int qidx,uint32_t qtype)10958 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10959 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10960 int qidx, uint32_t qtype)
10961 {
10962 struct lpfc_sli_ring *pring;
10963 int rc;
10964
10965 if (!eq || !cq || !wq) {
10966 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10967 "6085 Fast-path %s (%d) not allocated\n",
10968 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10969 return -ENOMEM;
10970 }
10971
10972 /* create the Cq first */
10973 rc = lpfc_cq_create(phba, cq, eq,
10974 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10975 if (rc) {
10976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10977 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10978 qidx, (uint32_t)rc);
10979 return rc;
10980 }
10981
10982 if (qtype != LPFC_MBOX) {
10983 /* Setup cq_map for fast lookup */
10984 if (cq_map)
10985 *cq_map = cq->queue_id;
10986
10987 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10988 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10989 qidx, cq->queue_id, qidx, eq->queue_id);
10990
10991 /* create the wq */
10992 rc = lpfc_wq_create(phba, wq, cq, qtype);
10993 if (rc) {
10994 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10995 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10996 qidx, (uint32_t)rc);
10997 /* no need to tear down cq - caller will do so */
10998 return rc;
10999 }
11000
11001 /* Bind this CQ/WQ to the NVME ring */
11002 pring = wq->pring;
11003 pring->sli.sli4.wqp = (void *)wq;
11004 cq->pring = pring;
11005
11006 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11007 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
11008 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
11009 } else {
11010 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
11011 if (rc) {
11012 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11013 "0539 Failed setup of slow-path MQ: "
11014 "rc = 0x%x\n", rc);
11015 /* no need to tear down cq - caller will do so */
11016 return rc;
11017 }
11018
11019 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11020 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11021 phba->sli4_hba.mbx_wq->queue_id,
11022 phba->sli4_hba.mbx_cq->queue_id);
11023 }
11024
11025 return 0;
11026 }
11027
11028 /**
11029 * lpfc_setup_cq_lookup - Setup the CQ lookup table
11030 * @phba: pointer to lpfc hba data structure.
11031 *
11032 * This routine will populate the cq_lookup table by all
11033 * available CQ queue_id's.
11034 **/
11035 static void
lpfc_setup_cq_lookup(struct lpfc_hba * phba)11036 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11037 {
11038 struct lpfc_queue *eq, *childq;
11039 int qidx;
11040
11041 memset(phba->sli4_hba.cq_lookup, 0,
11042 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11043 /* Loop thru all IRQ vectors */
11044 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11045 /* Get the EQ corresponding to the IRQ vector */
11046 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11047 if (!eq)
11048 continue;
11049 /* Loop through all CQs associated with that EQ */
11050 list_for_each_entry(childq, &eq->child_list, list) {
11051 if (childq->queue_id > phba->sli4_hba.cq_max)
11052 continue;
11053 if (childq->subtype == LPFC_IO)
11054 phba->sli4_hba.cq_lookup[childq->queue_id] =
11055 childq;
11056 }
11057 }
11058 }
11059
11060 /**
11061 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11062 * @phba: pointer to lpfc hba data structure.
11063 *
11064 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
11065 * operation.
11066 *
11067 * Return codes
11068 * 0 - successful
11069 * -ENOMEM - No available memory
11070 * -EIO - The mailbox failed to complete successfully.
11071 **/
11072 int
lpfc_sli4_queue_setup(struct lpfc_hba * phba)11073 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11074 {
11075 uint32_t shdr_status, shdr_add_status;
11076 union lpfc_sli4_cfg_shdr *shdr;
11077 struct lpfc_vector_map_info *cpup;
11078 struct lpfc_sli4_hdw_queue *qp;
11079 LPFC_MBOXQ_t *mboxq;
11080 int qidx, cpu;
11081 uint32_t length, usdelay;
11082 int rc = -ENOMEM;
11083
11084 /* Check for dual-ULP support */
11085 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11086 if (!mboxq) {
11087 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11088 "3249 Unable to allocate memory for "
11089 "QUERY_FW_CFG mailbox command\n");
11090 return -ENOMEM;
11091 }
11092 length = (sizeof(struct lpfc_mbx_query_fw_config) -
11093 sizeof(struct lpfc_sli4_cfg_mhdr));
11094 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11095 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
11096 length, LPFC_SLI4_MBX_EMBED);
11097
11098 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11099
11100 shdr = (union lpfc_sli4_cfg_shdr *)
11101 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11102 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11103 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11104 if (shdr_status || shdr_add_status || rc) {
11105 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11106 "3250 QUERY_FW_CFG mailbox failed with status "
11107 "x%x add_status x%x, mbx status x%x\n",
11108 shdr_status, shdr_add_status, rc);
11109 mempool_free(mboxq, phba->mbox_mem_pool);
11110 rc = -ENXIO;
11111 goto out_error;
11112 }
11113
11114 phba->sli4_hba.fw_func_mode =
11115 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11116 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11117 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11118 phba->sli4_hba.physical_port =
11119 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11120 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11121 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11122 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11123 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11124
11125 mempool_free(mboxq, phba->mbox_mem_pool);
11126
11127 /*
11128 * Set up HBA Event Queues (EQs)
11129 */
11130 qp = phba->sli4_hba.hdwq;
11131
11132 /* Set up HBA event queue */
11133 if (!qp) {
11134 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11135 "3147 Fast-path EQs not allocated\n");
11136 rc = -ENOMEM;
11137 goto out_error;
11138 }
11139
11140 /* Loop thru all IRQ vectors */
11141 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11142 /* Create HBA Event Queues (EQs) in order */
11143 for_each_present_cpu(cpu) {
11144 cpup = &phba->sli4_hba.cpu_map[cpu];
11145
11146 /* Look for the CPU thats using that vector with
11147 * LPFC_CPU_FIRST_IRQ set.
11148 */
11149 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11150 continue;
11151 if (qidx != cpup->eq)
11152 continue;
11153
11154 /* Create an EQ for that vector */
11155 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11156 phba->cfg_fcp_imax);
11157 if (rc) {
11158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11159 "0523 Failed setup of fast-path"
11160 " EQ (%d), rc = 0x%x\n",
11161 cpup->eq, (uint32_t)rc);
11162 goto out_destroy;
11163 }
11164
11165 /* Save the EQ for that vector in the hba_eq_hdl */
11166 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11167 qp[cpup->hdwq].hba_eq;
11168
11169 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11170 "2584 HBA EQ setup: queue[%d]-id=%d\n",
11171 cpup->eq,
11172 qp[cpup->hdwq].hba_eq->queue_id);
11173 }
11174 }
11175
11176 /* Loop thru all Hardware Queues */
11177 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11178 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11179 cpup = &phba->sli4_hba.cpu_map[cpu];
11180
11181 /* Create the CQ/WQ corresponding to the Hardware Queue */
11182 rc = lpfc_create_wq_cq(phba,
11183 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11184 qp[qidx].io_cq,
11185 qp[qidx].io_wq,
11186 &phba->sli4_hba.hdwq[qidx].io_cq_map,
11187 qidx,
11188 LPFC_IO);
11189 if (rc) {
11190 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11191 "0535 Failed to setup fastpath "
11192 "IO WQ/CQ (%d), rc = 0x%x\n",
11193 qidx, (uint32_t)rc);
11194 goto out_destroy;
11195 }
11196 }
11197
11198 /*
11199 * Set up Slow Path Complete Queues (CQs)
11200 */
11201
11202 /* Set up slow-path MBOX CQ/MQ */
11203
11204 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11206 "0528 %s not allocated\n",
11207 phba->sli4_hba.mbx_cq ?
11208 "Mailbox WQ" : "Mailbox CQ");
11209 rc = -ENOMEM;
11210 goto out_destroy;
11211 }
11212
11213 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11214 phba->sli4_hba.mbx_cq,
11215 phba->sli4_hba.mbx_wq,
11216 NULL, 0, LPFC_MBOX);
11217 if (rc) {
11218 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11219 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11220 (uint32_t)rc);
11221 goto out_destroy;
11222 }
11223 if (phba->nvmet_support) {
11224 if (!phba->sli4_hba.nvmet_cqset) {
11225 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11226 "3165 Fast-path NVME CQ Set "
11227 "array not allocated\n");
11228 rc = -ENOMEM;
11229 goto out_destroy;
11230 }
11231 if (phba->cfg_nvmet_mrq > 1) {
11232 rc = lpfc_cq_create_set(phba,
11233 phba->sli4_hba.nvmet_cqset,
11234 qp,
11235 LPFC_WCQ, LPFC_NVMET);
11236 if (rc) {
11237 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11238 "3164 Failed setup of NVME CQ "
11239 "Set, rc = 0x%x\n",
11240 (uint32_t)rc);
11241 goto out_destroy;
11242 }
11243 } else {
11244 /* Set up NVMET Receive Complete Queue */
11245 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11246 qp[0].hba_eq,
11247 LPFC_WCQ, LPFC_NVMET);
11248 if (rc) {
11249 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11250 "6089 Failed setup NVMET CQ: "
11251 "rc = 0x%x\n", (uint32_t)rc);
11252 goto out_destroy;
11253 }
11254 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11255
11256 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11257 "6090 NVMET CQ setup: cq-id=%d, "
11258 "parent eq-id=%d\n",
11259 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11260 qp[0].hba_eq->queue_id);
11261 }
11262 }
11263
11264 /* Set up slow-path ELS WQ/CQ */
11265 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11266 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11267 "0530 ELS %s not allocated\n",
11268 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11269 rc = -ENOMEM;
11270 goto out_destroy;
11271 }
11272 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11273 phba->sli4_hba.els_cq,
11274 phba->sli4_hba.els_wq,
11275 NULL, 0, LPFC_ELS);
11276 if (rc) {
11277 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11278 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11279 (uint32_t)rc);
11280 goto out_destroy;
11281 }
11282 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11283 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11284 phba->sli4_hba.els_wq->queue_id,
11285 phba->sli4_hba.els_cq->queue_id);
11286
11287 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11288 /* Set up NVME LS Complete Queue */
11289 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11290 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11291 "6091 LS %s not allocated\n",
11292 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11293 rc = -ENOMEM;
11294 goto out_destroy;
11295 }
11296 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11297 phba->sli4_hba.nvmels_cq,
11298 phba->sli4_hba.nvmels_wq,
11299 NULL, 0, LPFC_NVME_LS);
11300 if (rc) {
11301 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11302 "0526 Failed setup of NVVME LS WQ/CQ: "
11303 "rc = 0x%x\n", (uint32_t)rc);
11304 goto out_destroy;
11305 }
11306
11307 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11308 "6096 ELS WQ setup: wq-id=%d, "
11309 "parent cq-id=%d\n",
11310 phba->sli4_hba.nvmels_wq->queue_id,
11311 phba->sli4_hba.nvmels_cq->queue_id);
11312 }
11313
11314 /*
11315 * Create NVMET Receive Queue (RQ)
11316 */
11317 if (phba->nvmet_support) {
11318 if ((!phba->sli4_hba.nvmet_cqset) ||
11319 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11320 (!phba->sli4_hba.nvmet_mrq_data)) {
11321 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11322 "6130 MRQ CQ Queues not "
11323 "allocated\n");
11324 rc = -ENOMEM;
11325 goto out_destroy;
11326 }
11327 if (phba->cfg_nvmet_mrq > 1) {
11328 rc = lpfc_mrq_create(phba,
11329 phba->sli4_hba.nvmet_mrq_hdr,
11330 phba->sli4_hba.nvmet_mrq_data,
11331 phba->sli4_hba.nvmet_cqset,
11332 LPFC_NVMET);
11333 if (rc) {
11334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11335 "6098 Failed setup of NVMET "
11336 "MRQ: rc = 0x%x\n",
11337 (uint32_t)rc);
11338 goto out_destroy;
11339 }
11340
11341 } else {
11342 rc = lpfc_rq_create(phba,
11343 phba->sli4_hba.nvmet_mrq_hdr[0],
11344 phba->sli4_hba.nvmet_mrq_data[0],
11345 phba->sli4_hba.nvmet_cqset[0],
11346 LPFC_NVMET);
11347 if (rc) {
11348 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11349 "6057 Failed setup of NVMET "
11350 "Receive Queue: rc = 0x%x\n",
11351 (uint32_t)rc);
11352 goto out_destroy;
11353 }
11354
11355 lpfc_printf_log(
11356 phba, KERN_INFO, LOG_INIT,
11357 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11358 "dat-rq-id=%d parent cq-id=%d\n",
11359 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11360 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11361 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11362
11363 }
11364 }
11365
11366 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11368 "0540 Receive Queue not allocated\n");
11369 rc = -ENOMEM;
11370 goto out_destroy;
11371 }
11372
11373 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11374 phba->sli4_hba.els_cq, LPFC_USOL);
11375 if (rc) {
11376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11377 "0541 Failed setup of Receive Queue: "
11378 "rc = 0x%x\n", (uint32_t)rc);
11379 goto out_destroy;
11380 }
11381
11382 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11383 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11384 "parent cq-id=%d\n",
11385 phba->sli4_hba.hdr_rq->queue_id,
11386 phba->sli4_hba.dat_rq->queue_id,
11387 phba->sli4_hba.els_cq->queue_id);
11388
11389 if (phba->cfg_fcp_imax)
11390 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11391 else
11392 usdelay = 0;
11393
11394 for (qidx = 0; qidx < phba->cfg_irq_chann;
11395 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11396 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11397 usdelay);
11398
11399 if (phba->sli4_hba.cq_max) {
11400 kfree(phba->sli4_hba.cq_lookup);
11401 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11402 sizeof(struct lpfc_queue *), GFP_KERNEL);
11403 if (!phba->sli4_hba.cq_lookup) {
11404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11405 "0549 Failed setup of CQ Lookup table: "
11406 "size 0x%x\n", phba->sli4_hba.cq_max);
11407 rc = -ENOMEM;
11408 goto out_destroy;
11409 }
11410 lpfc_setup_cq_lookup(phba);
11411 }
11412 return 0;
11413
11414 out_destroy:
11415 lpfc_sli4_queue_unset(phba);
11416 out_error:
11417 return rc;
11418 }
11419
11420 /**
11421 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11422 * @phba: pointer to lpfc hba data structure.
11423 *
11424 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11425 * operation.
11426 *
11427 * Return codes
11428 * 0 - successful
11429 * -ENOMEM - No available memory
11430 * -EIO - The mailbox failed to complete successfully.
11431 **/
11432 void
lpfc_sli4_queue_unset(struct lpfc_hba * phba)11433 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11434 {
11435 struct lpfc_sli4_hdw_queue *qp;
11436 struct lpfc_queue *eq;
11437 int qidx;
11438
11439 /* Unset mailbox command work queue */
11440 if (phba->sli4_hba.mbx_wq)
11441 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11442
11443 /* Unset NVME LS work queue */
11444 if (phba->sli4_hba.nvmels_wq)
11445 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11446
11447 /* Unset ELS work queue */
11448 if (phba->sli4_hba.els_wq)
11449 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11450
11451 /* Unset unsolicited receive queue */
11452 if (phba->sli4_hba.hdr_rq)
11453 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11454 phba->sli4_hba.dat_rq);
11455
11456 /* Unset mailbox command complete queue */
11457 if (phba->sli4_hba.mbx_cq)
11458 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11459
11460 /* Unset ELS complete queue */
11461 if (phba->sli4_hba.els_cq)
11462 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11463
11464 /* Unset NVME LS complete queue */
11465 if (phba->sli4_hba.nvmels_cq)
11466 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11467
11468 if (phba->nvmet_support) {
11469 /* Unset NVMET MRQ queue */
11470 if (phba->sli4_hba.nvmet_mrq_hdr) {
11471 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11472 lpfc_rq_destroy(
11473 phba,
11474 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11475 phba->sli4_hba.nvmet_mrq_data[qidx]);
11476 }
11477
11478 /* Unset NVMET CQ Set complete queue */
11479 if (phba->sli4_hba.nvmet_cqset) {
11480 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11481 lpfc_cq_destroy(
11482 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11483 }
11484 }
11485
11486 /* Unset fast-path SLI4 queues */
11487 if (phba->sli4_hba.hdwq) {
11488 /* Loop thru all Hardware Queues */
11489 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11490 /* Destroy the CQ/WQ corresponding to Hardware Queue */
11491 qp = &phba->sli4_hba.hdwq[qidx];
11492 lpfc_wq_destroy(phba, qp->io_wq);
11493 lpfc_cq_destroy(phba, qp->io_cq);
11494 }
11495 /* Loop thru all IRQ vectors */
11496 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11497 /* Destroy the EQ corresponding to the IRQ vector */
11498 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11499 lpfc_eq_destroy(phba, eq);
11500 }
11501 }
11502
11503 kfree(phba->sli4_hba.cq_lookup);
11504 phba->sli4_hba.cq_lookup = NULL;
11505 phba->sli4_hba.cq_max = 0;
11506 }
11507
11508 /**
11509 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11510 * @phba: pointer to lpfc hba data structure.
11511 *
11512 * This routine is invoked to allocate and set up a pool of completion queue
11513 * events. The body of the completion queue event is a completion queue entry
11514 * CQE. For now, this pool is used for the interrupt service routine to queue
11515 * the following HBA completion queue events for the worker thread to process:
11516 * - Mailbox asynchronous events
11517 * - Receive queue completion unsolicited events
11518 * Later, this can be used for all the slow-path events.
11519 *
11520 * Return codes
11521 * 0 - successful
11522 * -ENOMEM - No available memory
11523 **/
11524 static int
lpfc_sli4_cq_event_pool_create(struct lpfc_hba * phba)11525 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11526 {
11527 struct lpfc_cq_event *cq_event;
11528 int i;
11529
11530 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11531 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11532 if (!cq_event)
11533 goto out_pool_create_fail;
11534 list_add_tail(&cq_event->list,
11535 &phba->sli4_hba.sp_cqe_event_pool);
11536 }
11537 return 0;
11538
11539 out_pool_create_fail:
11540 lpfc_sli4_cq_event_pool_destroy(phba);
11541 return -ENOMEM;
11542 }
11543
11544 /**
11545 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11546 * @phba: pointer to lpfc hba data structure.
11547 *
11548 * This routine is invoked to free the pool of completion queue events at
11549 * driver unload time. Note that, it is the responsibility of the driver
11550 * cleanup routine to free all the outstanding completion-queue events
11551 * allocated from this pool back into the pool before invoking this routine
11552 * to destroy the pool.
11553 **/
11554 static void
lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba * phba)11555 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11556 {
11557 struct lpfc_cq_event *cq_event, *next_cq_event;
11558
11559 list_for_each_entry_safe(cq_event, next_cq_event,
11560 &phba->sli4_hba.sp_cqe_event_pool, list) {
11561 list_del(&cq_event->list);
11562 kfree(cq_event);
11563 }
11564 }
11565
11566 /**
11567 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11568 * @phba: pointer to lpfc hba data structure.
11569 *
11570 * This routine is the lock free version of the API invoked to allocate a
11571 * completion-queue event from the free pool.
11572 *
11573 * Return: Pointer to the newly allocated completion-queue event if successful
11574 * NULL otherwise.
11575 **/
11576 struct lpfc_cq_event *
__lpfc_sli4_cq_event_alloc(struct lpfc_hba * phba)11577 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11578 {
11579 struct lpfc_cq_event *cq_event = NULL;
11580
11581 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11582 struct lpfc_cq_event, list);
11583 return cq_event;
11584 }
11585
11586 /**
11587 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11588 * @phba: pointer to lpfc hba data structure.
11589 *
11590 * This routine is the lock version of the API invoked to allocate a
11591 * completion-queue event from the free pool.
11592 *
11593 * Return: Pointer to the newly allocated completion-queue event if successful
11594 * NULL otherwise.
11595 **/
11596 struct lpfc_cq_event *
lpfc_sli4_cq_event_alloc(struct lpfc_hba * phba)11597 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11598 {
11599 struct lpfc_cq_event *cq_event;
11600 unsigned long iflags;
11601
11602 spin_lock_irqsave(&phba->hbalock, iflags);
11603 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11604 spin_unlock_irqrestore(&phba->hbalock, iflags);
11605 return cq_event;
11606 }
11607
11608 /**
11609 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11610 * @phba: pointer to lpfc hba data structure.
11611 * @cq_event: pointer to the completion queue event to be freed.
11612 *
11613 * This routine is the lock free version of the API invoked to release a
11614 * completion-queue event back into the free pool.
11615 **/
11616 void
__lpfc_sli4_cq_event_release(struct lpfc_hba * phba,struct lpfc_cq_event * cq_event)11617 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11618 struct lpfc_cq_event *cq_event)
11619 {
11620 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11621 }
11622
11623 /**
11624 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11625 * @phba: pointer to lpfc hba data structure.
11626 * @cq_event: pointer to the completion queue event to be freed.
11627 *
11628 * This routine is the lock version of the API invoked to release a
11629 * completion-queue event back into the free pool.
11630 **/
11631 void
lpfc_sli4_cq_event_release(struct lpfc_hba * phba,struct lpfc_cq_event * cq_event)11632 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11633 struct lpfc_cq_event *cq_event)
11634 {
11635 unsigned long iflags;
11636 spin_lock_irqsave(&phba->hbalock, iflags);
11637 __lpfc_sli4_cq_event_release(phba, cq_event);
11638 spin_unlock_irqrestore(&phba->hbalock, iflags);
11639 }
11640
11641 /**
11642 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11643 * @phba: pointer to lpfc hba data structure.
11644 *
11645 * This routine is to free all the pending completion-queue events to the
11646 * back into the free pool for device reset.
11647 **/
11648 static void
lpfc_sli4_cq_event_release_all(struct lpfc_hba * phba)11649 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11650 {
11651 LIST_HEAD(cq_event_list);
11652 struct lpfc_cq_event *cq_event;
11653 unsigned long iflags;
11654
11655 /* Retrieve all the pending WCQEs from pending WCQE lists */
11656
11657 /* Pending ELS XRI abort events */
11658 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11659 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11660 &cq_event_list);
11661 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11662
11663 /* Pending asynnc events */
11664 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11665 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11666 &cq_event_list);
11667 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11668
11669 while (!list_empty(&cq_event_list)) {
11670 list_remove_head(&cq_event_list, cq_event,
11671 struct lpfc_cq_event, list);
11672 lpfc_sli4_cq_event_release(phba, cq_event);
11673 }
11674 }
11675
11676 /**
11677 * lpfc_pci_function_reset - Reset pci function.
11678 * @phba: pointer to lpfc hba data structure.
11679 *
11680 * This routine is invoked to request a PCI function reset. It will destroys
11681 * all resources assigned to the PCI function which originates this request.
11682 *
11683 * Return codes
11684 * 0 - successful
11685 * -ENOMEM - No available memory
11686 * -EIO - The mailbox failed to complete successfully.
11687 **/
11688 int
lpfc_pci_function_reset(struct lpfc_hba * phba)11689 lpfc_pci_function_reset(struct lpfc_hba *phba)
11690 {
11691 LPFC_MBOXQ_t *mboxq;
11692 uint32_t rc = 0, if_type;
11693 uint32_t shdr_status, shdr_add_status;
11694 uint32_t rdy_chk;
11695 uint32_t port_reset = 0;
11696 union lpfc_sli4_cfg_shdr *shdr;
11697 struct lpfc_register reg_data;
11698 uint16_t devid;
11699
11700 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11701 switch (if_type) {
11702 case LPFC_SLI_INTF_IF_TYPE_0:
11703 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11704 GFP_KERNEL);
11705 if (!mboxq) {
11706 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11707 "0494 Unable to allocate memory for "
11708 "issuing SLI_FUNCTION_RESET mailbox "
11709 "command\n");
11710 return -ENOMEM;
11711 }
11712
11713 /* Setup PCI function reset mailbox-ioctl command */
11714 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11715 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11716 LPFC_SLI4_MBX_EMBED);
11717 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11718 shdr = (union lpfc_sli4_cfg_shdr *)
11719 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11720 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11721 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11722 &shdr->response);
11723 mempool_free(mboxq, phba->mbox_mem_pool);
11724 if (shdr_status || shdr_add_status || rc) {
11725 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11726 "0495 SLI_FUNCTION_RESET mailbox "
11727 "failed with status x%x add_status x%x,"
11728 " mbx status x%x\n",
11729 shdr_status, shdr_add_status, rc);
11730 rc = -ENXIO;
11731 }
11732 break;
11733 case LPFC_SLI_INTF_IF_TYPE_2:
11734 case LPFC_SLI_INTF_IF_TYPE_6:
11735 wait:
11736 /*
11737 * Poll the Port Status Register and wait for RDY for
11738 * up to 30 seconds. If the port doesn't respond, treat
11739 * it as an error.
11740 */
11741 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11742 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11743 STATUSregaddr, ®_data.word0)) {
11744 rc = -ENODEV;
11745 goto out;
11746 }
11747 if (bf_get(lpfc_sliport_status_rdy, ®_data))
11748 break;
11749 msleep(20);
11750 }
11751
11752 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
11753 phba->work_status[0] = readl(
11754 phba->sli4_hba.u.if_type2.ERR1regaddr);
11755 phba->work_status[1] = readl(
11756 phba->sli4_hba.u.if_type2.ERR2regaddr);
11757 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11758 "2890 Port not ready, port status reg "
11759 "0x%x error 1=0x%x, error 2=0x%x\n",
11760 reg_data.word0,
11761 phba->work_status[0],
11762 phba->work_status[1]);
11763 rc = -ENODEV;
11764 goto out;
11765 }
11766
11767 if (bf_get(lpfc_sliport_status_pldv, ®_data))
11768 lpfc_pldv_detect = true;
11769
11770 if (!port_reset) {
11771 /*
11772 * Reset the port now
11773 */
11774 reg_data.word0 = 0;
11775 bf_set(lpfc_sliport_ctrl_end, ®_data,
11776 LPFC_SLIPORT_LITTLE_ENDIAN);
11777 bf_set(lpfc_sliport_ctrl_ip, ®_data,
11778 LPFC_SLIPORT_INIT_PORT);
11779 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11780 CTRLregaddr);
11781 /* flush */
11782 pci_read_config_word(phba->pcidev,
11783 PCI_DEVICE_ID, &devid);
11784
11785 port_reset = 1;
11786 msleep(20);
11787 goto wait;
11788 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
11789 rc = -ENODEV;
11790 goto out;
11791 }
11792 break;
11793
11794 case LPFC_SLI_INTF_IF_TYPE_1:
11795 default:
11796 break;
11797 }
11798
11799 out:
11800 /* Catch the not-ready port failure after a port reset. */
11801 if (rc) {
11802 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11803 "3317 HBA not functional: IP Reset Failed "
11804 "try: echo fw_reset > board_mode\n");
11805 rc = -ENODEV;
11806 }
11807
11808 return rc;
11809 }
11810
11811 /**
11812 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11813 * @phba: pointer to lpfc hba data structure.
11814 *
11815 * This routine is invoked to set up the PCI device memory space for device
11816 * with SLI-4 interface spec.
11817 *
11818 * Return codes
11819 * 0 - successful
11820 * other values - error
11821 **/
11822 static int
lpfc_sli4_pci_mem_setup(struct lpfc_hba * phba)11823 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11824 {
11825 struct pci_dev *pdev = phba->pcidev;
11826 unsigned long bar0map_len, bar1map_len, bar2map_len;
11827 int error;
11828 uint32_t if_type;
11829
11830 if (!pdev)
11831 return -ENODEV;
11832
11833 /* Set the device DMA mask size */
11834 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11835 if (error)
11836 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11837 if (error)
11838 return error;
11839
11840 /*
11841 * The BARs and register set definitions and offset locations are
11842 * dependent on the if_type.
11843 */
11844 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11845 &phba->sli4_hba.sli_intf.word0)) {
11846 return -ENODEV;
11847 }
11848
11849 /* There is no SLI3 failback for SLI4 devices. */
11850 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11851 LPFC_SLI_INTF_VALID) {
11852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11853 "2894 SLI_INTF reg contents invalid "
11854 "sli_intf reg 0x%x\n",
11855 phba->sli4_hba.sli_intf.word0);
11856 return -ENODEV;
11857 }
11858
11859 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11860 /*
11861 * Get the bus address of SLI4 device Bar regions and the
11862 * number of bytes required by each mapping. The mapping of the
11863 * particular PCI BARs regions is dependent on the type of
11864 * SLI4 device.
11865 */
11866 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11867 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11868 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11869
11870 /*
11871 * Map SLI4 PCI Config Space Register base to a kernel virtual
11872 * addr
11873 */
11874 phba->sli4_hba.conf_regs_memmap_p =
11875 ioremap(phba->pci_bar0_map, bar0map_len);
11876 if (!phba->sli4_hba.conf_regs_memmap_p) {
11877 dev_printk(KERN_ERR, &pdev->dev,
11878 "ioremap failed for SLI4 PCI config "
11879 "registers.\n");
11880 return -ENODEV;
11881 }
11882 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11883 /* Set up BAR0 PCI config space register memory map */
11884 lpfc_sli4_bar0_register_memmap(phba, if_type);
11885 } else {
11886 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11887 bar0map_len = pci_resource_len(pdev, 1);
11888 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11889 dev_printk(KERN_ERR, &pdev->dev,
11890 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11891 return -ENODEV;
11892 }
11893 phba->sli4_hba.conf_regs_memmap_p =
11894 ioremap(phba->pci_bar0_map, bar0map_len);
11895 if (!phba->sli4_hba.conf_regs_memmap_p) {
11896 dev_printk(KERN_ERR, &pdev->dev,
11897 "ioremap failed for SLI4 PCI config "
11898 "registers.\n");
11899 return -ENODEV;
11900 }
11901 lpfc_sli4_bar0_register_memmap(phba, if_type);
11902 }
11903
11904 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11905 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11906 /*
11907 * Map SLI4 if type 0 HBA Control Register base to a
11908 * kernel virtual address and setup the registers.
11909 */
11910 phba->pci_bar1_map = pci_resource_start(pdev,
11911 PCI_64BIT_BAR2);
11912 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11913 phba->sli4_hba.ctrl_regs_memmap_p =
11914 ioremap(phba->pci_bar1_map,
11915 bar1map_len);
11916 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11917 dev_err(&pdev->dev,
11918 "ioremap failed for SLI4 HBA "
11919 "control registers.\n");
11920 error = -ENOMEM;
11921 goto out_iounmap_conf;
11922 }
11923 phba->pci_bar2_memmap_p =
11924 phba->sli4_hba.ctrl_regs_memmap_p;
11925 lpfc_sli4_bar1_register_memmap(phba, if_type);
11926 } else {
11927 error = -ENOMEM;
11928 goto out_iounmap_conf;
11929 }
11930 }
11931
11932 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11933 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11934 /*
11935 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11936 * virtual address and setup the registers.
11937 */
11938 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11939 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11940 phba->sli4_hba.drbl_regs_memmap_p =
11941 ioremap(phba->pci_bar1_map, bar1map_len);
11942 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11943 dev_err(&pdev->dev,
11944 "ioremap failed for SLI4 HBA doorbell registers.\n");
11945 error = -ENOMEM;
11946 goto out_iounmap_conf;
11947 }
11948 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11949 lpfc_sli4_bar1_register_memmap(phba, if_type);
11950 }
11951
11952 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11953 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11954 /*
11955 * Map SLI4 if type 0 HBA Doorbell Register base to
11956 * a kernel virtual address and setup the registers.
11957 */
11958 phba->pci_bar2_map = pci_resource_start(pdev,
11959 PCI_64BIT_BAR4);
11960 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11961 phba->sli4_hba.drbl_regs_memmap_p =
11962 ioremap(phba->pci_bar2_map,
11963 bar2map_len);
11964 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11965 dev_err(&pdev->dev,
11966 "ioremap failed for SLI4 HBA"
11967 " doorbell registers.\n");
11968 error = -ENOMEM;
11969 goto out_iounmap_ctrl;
11970 }
11971 phba->pci_bar4_memmap_p =
11972 phba->sli4_hba.drbl_regs_memmap_p;
11973 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11974 if (error)
11975 goto out_iounmap_all;
11976 } else {
11977 error = -ENOMEM;
11978 goto out_iounmap_ctrl;
11979 }
11980 }
11981
11982 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11983 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11984 /*
11985 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11986 * virtual address and setup the registers.
11987 */
11988 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11989 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11990 phba->sli4_hba.dpp_regs_memmap_p =
11991 ioremap(phba->pci_bar2_map, bar2map_len);
11992 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11993 dev_err(&pdev->dev,
11994 "ioremap failed for SLI4 HBA dpp registers.\n");
11995 error = -ENOMEM;
11996 goto out_iounmap_all;
11997 }
11998 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11999 }
12000
12001 /* Set up the EQ/CQ register handeling functions now */
12002 switch (if_type) {
12003 case LPFC_SLI_INTF_IF_TYPE_0:
12004 case LPFC_SLI_INTF_IF_TYPE_2:
12005 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
12006 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
12007 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
12008 break;
12009 case LPFC_SLI_INTF_IF_TYPE_6:
12010 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
12011 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
12012 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12013 break;
12014 default:
12015 break;
12016 }
12017
12018 return 0;
12019
12020 out_iounmap_all:
12021 if (phba->sli4_hba.drbl_regs_memmap_p)
12022 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12023 out_iounmap_ctrl:
12024 if (phba->sli4_hba.ctrl_regs_memmap_p)
12025 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12026 out_iounmap_conf:
12027 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12028
12029 return error;
12030 }
12031
12032 /**
12033 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12034 * @phba: pointer to lpfc hba data structure.
12035 *
12036 * This routine is invoked to unset the PCI device memory space for device
12037 * with SLI-4 interface spec.
12038 **/
12039 static void
lpfc_sli4_pci_mem_unset(struct lpfc_hba * phba)12040 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12041 {
12042 uint32_t if_type;
12043 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12044
12045 switch (if_type) {
12046 case LPFC_SLI_INTF_IF_TYPE_0:
12047 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12048 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12049 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12050 break;
12051 case LPFC_SLI_INTF_IF_TYPE_2:
12052 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12053 break;
12054 case LPFC_SLI_INTF_IF_TYPE_6:
12055 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12056 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12057 if (phba->sli4_hba.dpp_regs_memmap_p)
12058 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12059 break;
12060 case LPFC_SLI_INTF_IF_TYPE_1:
12061 break;
12062 default:
12063 dev_printk(KERN_ERR, &phba->pcidev->dev,
12064 "FATAL - unsupported SLI4 interface type - %d\n",
12065 if_type);
12066 break;
12067 }
12068 }
12069
12070 /**
12071 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12072 * @phba: pointer to lpfc hba data structure.
12073 *
12074 * This routine is invoked to enable the MSI-X interrupt vectors to device
12075 * with SLI-3 interface specs.
12076 *
12077 * Return codes
12078 * 0 - successful
12079 * other values - error
12080 **/
12081 static int
lpfc_sli_enable_msix(struct lpfc_hba * phba)12082 lpfc_sli_enable_msix(struct lpfc_hba *phba)
12083 {
12084 int rc;
12085 LPFC_MBOXQ_t *pmb;
12086
12087 /* Set up MSI-X multi-message vectors */
12088 rc = pci_alloc_irq_vectors(phba->pcidev,
12089 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
12090 if (rc < 0) {
12091 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12092 "0420 PCI enable MSI-X failed (%d)\n", rc);
12093 goto vec_fail_out;
12094 }
12095
12096 /*
12097 * Assign MSI-X vectors to interrupt handlers
12098 */
12099
12100 /* vector-0 is associated to slow-path handler */
12101 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12102 &lpfc_sli_sp_intr_handler, 0,
12103 LPFC_SP_DRIVER_HANDLER_NAME, phba);
12104 if (rc) {
12105 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12106 "0421 MSI-X slow-path request_irq failed "
12107 "(%d)\n", rc);
12108 goto msi_fail_out;
12109 }
12110
12111 /* vector-1 is associated to fast-path handler */
12112 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12113 &lpfc_sli_fp_intr_handler, 0,
12114 LPFC_FP_DRIVER_HANDLER_NAME, phba);
12115
12116 if (rc) {
12117 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12118 "0429 MSI-X fast-path request_irq failed "
12119 "(%d)\n", rc);
12120 goto irq_fail_out;
12121 }
12122
12123 /*
12124 * Configure HBA MSI-X attention conditions to messages
12125 */
12126 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12127
12128 if (!pmb) {
12129 rc = -ENOMEM;
12130 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12131 "0474 Unable to allocate memory for issuing "
12132 "MBOX_CONFIG_MSI command\n");
12133 goto mem_fail_out;
12134 }
12135 rc = lpfc_config_msi(phba, pmb);
12136 if (rc)
12137 goto mbx_fail_out;
12138 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12139 if (rc != MBX_SUCCESS) {
12140 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12141 "0351 Config MSI mailbox command failed, "
12142 "mbxCmd x%x, mbxStatus x%x\n",
12143 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12144 goto mbx_fail_out;
12145 }
12146
12147 /* Free memory allocated for mailbox command */
12148 mempool_free(pmb, phba->mbox_mem_pool);
12149 return rc;
12150
12151 mbx_fail_out:
12152 /* Free memory allocated for mailbox command */
12153 mempool_free(pmb, phba->mbox_mem_pool);
12154
12155 mem_fail_out:
12156 /* free the irq already requested */
12157 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12158
12159 irq_fail_out:
12160 /* free the irq already requested */
12161 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12162
12163 msi_fail_out:
12164 /* Unconfigure MSI-X capability structure */
12165 pci_free_irq_vectors(phba->pcidev);
12166
12167 vec_fail_out:
12168 return rc;
12169 }
12170
12171 /**
12172 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12173 * @phba: pointer to lpfc hba data structure.
12174 *
12175 * This routine is invoked to enable the MSI interrupt mode to device with
12176 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12177 * enable the MSI vector. The device driver is responsible for calling the
12178 * request_irq() to register MSI vector with a interrupt the handler, which
12179 * is done in this function.
12180 *
12181 * Return codes
12182 * 0 - successful
12183 * other values - error
12184 */
12185 static int
lpfc_sli_enable_msi(struct lpfc_hba * phba)12186 lpfc_sli_enable_msi(struct lpfc_hba *phba)
12187 {
12188 int rc;
12189
12190 rc = pci_enable_msi(phba->pcidev);
12191 if (!rc)
12192 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12193 "0012 PCI enable MSI mode success.\n");
12194 else {
12195 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12196 "0471 PCI enable MSI mode failed (%d)\n", rc);
12197 return rc;
12198 }
12199
12200 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12201 0, LPFC_DRIVER_NAME, phba);
12202 if (rc) {
12203 pci_disable_msi(phba->pcidev);
12204 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12205 "0478 MSI request_irq failed (%d)\n", rc);
12206 }
12207 return rc;
12208 }
12209
12210 /**
12211 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12212 * @phba: pointer to lpfc hba data structure.
12213 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12214 *
12215 * This routine is invoked to enable device interrupt and associate driver's
12216 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12217 * spec. Depends on the interrupt mode configured to the driver, the driver
12218 * will try to fallback from the configured interrupt mode to an interrupt
12219 * mode which is supported by the platform, kernel, and device in the order
12220 * of:
12221 * MSI-X -> MSI -> IRQ.
12222 *
12223 * Return codes
12224 * 0 - successful
12225 * other values - error
12226 **/
12227 static uint32_t
lpfc_sli_enable_intr(struct lpfc_hba * phba,uint32_t cfg_mode)12228 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12229 {
12230 uint32_t intr_mode = LPFC_INTR_ERROR;
12231 int retval;
12232
12233 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12234 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12235 if (retval)
12236 return intr_mode;
12237 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12238
12239 if (cfg_mode == 2) {
12240 /* Now, try to enable MSI-X interrupt mode */
12241 retval = lpfc_sli_enable_msix(phba);
12242 if (!retval) {
12243 /* Indicate initialization to MSI-X mode */
12244 phba->intr_type = MSIX;
12245 intr_mode = 2;
12246 }
12247 }
12248
12249 /* Fallback to MSI if MSI-X initialization failed */
12250 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12251 retval = lpfc_sli_enable_msi(phba);
12252 if (!retval) {
12253 /* Indicate initialization to MSI mode */
12254 phba->intr_type = MSI;
12255 intr_mode = 1;
12256 }
12257 }
12258
12259 /* Fallback to INTx if both MSI-X/MSI initalization failed */
12260 if (phba->intr_type == NONE) {
12261 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12262 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12263 if (!retval) {
12264 /* Indicate initialization to INTx mode */
12265 phba->intr_type = INTx;
12266 intr_mode = 0;
12267 }
12268 }
12269 return intr_mode;
12270 }
12271
12272 /**
12273 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12274 * @phba: pointer to lpfc hba data structure.
12275 *
12276 * This routine is invoked to disable device interrupt and disassociate the
12277 * driver's interrupt handler(s) from interrupt vector(s) to device with
12278 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12279 * release the interrupt vector(s) for the message signaled interrupt.
12280 **/
12281 static void
lpfc_sli_disable_intr(struct lpfc_hba * phba)12282 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12283 {
12284 int nr_irqs, i;
12285
12286 if (phba->intr_type == MSIX)
12287 nr_irqs = LPFC_MSIX_VECTORS;
12288 else
12289 nr_irqs = 1;
12290
12291 for (i = 0; i < nr_irqs; i++)
12292 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12293 pci_free_irq_vectors(phba->pcidev);
12294
12295 /* Reset interrupt management states */
12296 phba->intr_type = NONE;
12297 phba->sli.slistat.sli_intr = 0;
12298 }
12299
12300 /**
12301 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12302 * @phba: pointer to lpfc hba data structure.
12303 * @id: EQ vector index or Hardware Queue index
12304 * @match: LPFC_FIND_BY_EQ = match by EQ
12305 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
12306 * Return the CPU that matches the selection criteria
12307 */
12308 static uint16_t
lpfc_find_cpu_handle(struct lpfc_hba * phba,uint16_t id,int match)12309 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12310 {
12311 struct lpfc_vector_map_info *cpup;
12312 int cpu;
12313
12314 /* Loop through all CPUs */
12315 for_each_present_cpu(cpu) {
12316 cpup = &phba->sli4_hba.cpu_map[cpu];
12317
12318 /* If we are matching by EQ, there may be multiple CPUs using
12319 * using the same vector, so select the one with
12320 * LPFC_CPU_FIRST_IRQ set.
12321 */
12322 if ((match == LPFC_FIND_BY_EQ) &&
12323 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12324 (cpup->eq == id))
12325 return cpu;
12326
12327 /* If matching by HDWQ, select the first CPU that matches */
12328 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12329 return cpu;
12330 }
12331 return 0;
12332 }
12333
12334 #ifdef CONFIG_X86
12335 /**
12336 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12337 * @phba: pointer to lpfc hba data structure.
12338 * @cpu: CPU map index
12339 * @phys_id: CPU package physical id
12340 * @core_id: CPU core id
12341 */
12342 static int
lpfc_find_hyper(struct lpfc_hba * phba,int cpu,uint16_t phys_id,uint16_t core_id)12343 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12344 uint16_t phys_id, uint16_t core_id)
12345 {
12346 struct lpfc_vector_map_info *cpup;
12347 int idx;
12348
12349 for_each_present_cpu(idx) {
12350 cpup = &phba->sli4_hba.cpu_map[idx];
12351 /* Does the cpup match the one we are looking for */
12352 if ((cpup->phys_id == phys_id) &&
12353 (cpup->core_id == core_id) &&
12354 (cpu != idx))
12355 return 1;
12356 }
12357 return 0;
12358 }
12359 #endif
12360
12361 /*
12362 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12363 * @phba: pointer to lpfc hba data structure.
12364 * @eqidx: index for eq and irq vector
12365 * @flag: flags to set for vector_map structure
12366 * @cpu: cpu used to index vector_map structure
12367 *
12368 * The routine assigns eq info into vector_map structure
12369 */
12370 static inline void
lpfc_assign_eq_map_info(struct lpfc_hba * phba,uint16_t eqidx,uint16_t flag,unsigned int cpu)12371 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12372 unsigned int cpu)
12373 {
12374 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12375 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12376
12377 cpup->eq = eqidx;
12378 cpup->flag |= flag;
12379
12380 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12381 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12382 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12383 }
12384
12385 /**
12386 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12387 * @phba: pointer to lpfc hba data structure.
12388 *
12389 * The routine initializes the cpu_map array structure
12390 */
12391 static void
lpfc_cpu_map_array_init(struct lpfc_hba * phba)12392 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12393 {
12394 struct lpfc_vector_map_info *cpup;
12395 struct lpfc_eq_intr_info *eqi;
12396 int cpu;
12397
12398 for_each_possible_cpu(cpu) {
12399 cpup = &phba->sli4_hba.cpu_map[cpu];
12400 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12401 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12402 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12403 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12404 cpup->flag = 0;
12405 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12406 INIT_LIST_HEAD(&eqi->list);
12407 eqi->icnt = 0;
12408 }
12409 }
12410
12411 /**
12412 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12413 * @phba: pointer to lpfc hba data structure.
12414 *
12415 * The routine initializes the hba_eq_hdl array structure
12416 */
12417 static void
lpfc_hba_eq_hdl_array_init(struct lpfc_hba * phba)12418 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12419 {
12420 struct lpfc_hba_eq_hdl *eqhdl;
12421 int i;
12422
12423 for (i = 0; i < phba->cfg_irq_chann; i++) {
12424 eqhdl = lpfc_get_eq_hdl(i);
12425 eqhdl->irq = LPFC_IRQ_EMPTY;
12426 eqhdl->phba = phba;
12427 }
12428 }
12429
12430 /**
12431 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12432 * @phba: pointer to lpfc hba data structure.
12433 * @vectors: number of msix vectors allocated.
12434 *
12435 * The routine will figure out the CPU affinity assignment for every
12436 * MSI-X vector allocated for the HBA.
12437 * In addition, the CPU to IO channel mapping will be calculated
12438 * and the phba->sli4_hba.cpu_map array will reflect this.
12439 */
12440 static void
lpfc_cpu_affinity_check(struct lpfc_hba * phba,int vectors)12441 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12442 {
12443 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12444 int max_phys_id, min_phys_id;
12445 int max_core_id, min_core_id;
12446 struct lpfc_vector_map_info *cpup;
12447 struct lpfc_vector_map_info *new_cpup;
12448 #ifdef CONFIG_X86
12449 struct cpuinfo_x86 *cpuinfo;
12450 #endif
12451 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12452 struct lpfc_hdwq_stat *c_stat;
12453 #endif
12454
12455 max_phys_id = 0;
12456 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12457 max_core_id = 0;
12458 min_core_id = LPFC_VECTOR_MAP_EMPTY;
12459
12460 /* Update CPU map with physical id and core id of each CPU */
12461 for_each_present_cpu(cpu) {
12462 cpup = &phba->sli4_hba.cpu_map[cpu];
12463 #ifdef CONFIG_X86
12464 cpuinfo = &cpu_data(cpu);
12465 cpup->phys_id = cpuinfo->phys_proc_id;
12466 cpup->core_id = cpuinfo->cpu_core_id;
12467 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12468 cpup->flag |= LPFC_CPU_MAP_HYPER;
12469 #else
12470 /* No distinction between CPUs for other platforms */
12471 cpup->phys_id = 0;
12472 cpup->core_id = cpu;
12473 #endif
12474
12475 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12476 "3328 CPU %d physid %d coreid %d flag x%x\n",
12477 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12478
12479 if (cpup->phys_id > max_phys_id)
12480 max_phys_id = cpup->phys_id;
12481 if (cpup->phys_id < min_phys_id)
12482 min_phys_id = cpup->phys_id;
12483
12484 if (cpup->core_id > max_core_id)
12485 max_core_id = cpup->core_id;
12486 if (cpup->core_id < min_core_id)
12487 min_core_id = cpup->core_id;
12488 }
12489
12490 /* After looking at each irq vector assigned to this pcidev, its
12491 * possible to see that not ALL CPUs have been accounted for.
12492 * Next we will set any unassigned (unaffinitized) cpu map
12493 * entries to a IRQ on the same phys_id.
12494 */
12495 first_cpu = cpumask_first(cpu_present_mask);
12496 start_cpu = first_cpu;
12497
12498 for_each_present_cpu(cpu) {
12499 cpup = &phba->sli4_hba.cpu_map[cpu];
12500
12501 /* Is this CPU entry unassigned */
12502 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12503 /* Mark CPU as IRQ not assigned by the kernel */
12504 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12505
12506 /* If so, find a new_cpup that is on the SAME
12507 * phys_id as cpup. start_cpu will start where we
12508 * left off so all unassigned entries don't get assgined
12509 * the IRQ of the first entry.
12510 */
12511 new_cpu = start_cpu;
12512 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12513 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12514 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12515 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12516 (new_cpup->phys_id == cpup->phys_id))
12517 goto found_same;
12518 new_cpu = lpfc_next_present_cpu(new_cpu);
12519 }
12520 /* At this point, we leave the CPU as unassigned */
12521 continue;
12522 found_same:
12523 /* We found a matching phys_id, so copy the IRQ info */
12524 cpup->eq = new_cpup->eq;
12525
12526 /* Bump start_cpu to the next slot to minmize the
12527 * chance of having multiple unassigned CPU entries
12528 * selecting the same IRQ.
12529 */
12530 start_cpu = lpfc_next_present_cpu(new_cpu);
12531
12532 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12533 "3337 Set Affinity: CPU %d "
12534 "eq %d from peer cpu %d same "
12535 "phys_id (%d)\n",
12536 cpu, cpup->eq, new_cpu,
12537 cpup->phys_id);
12538 }
12539 }
12540
12541 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12542 start_cpu = first_cpu;
12543
12544 for_each_present_cpu(cpu) {
12545 cpup = &phba->sli4_hba.cpu_map[cpu];
12546
12547 /* Is this entry unassigned */
12548 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12549 /* Mark it as IRQ not assigned by the kernel */
12550 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12551
12552 /* If so, find a new_cpup thats on ANY phys_id
12553 * as the cpup. start_cpu will start where we
12554 * left off so all unassigned entries don't get
12555 * assigned the IRQ of the first entry.
12556 */
12557 new_cpu = start_cpu;
12558 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12559 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12560 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12561 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12562 goto found_any;
12563 new_cpu = lpfc_next_present_cpu(new_cpu);
12564 }
12565 /* We should never leave an entry unassigned */
12566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12567 "3339 Set Affinity: CPU %d "
12568 "eq %d UNASSIGNED\n",
12569 cpup->hdwq, cpup->eq);
12570 continue;
12571 found_any:
12572 /* We found an available entry, copy the IRQ info */
12573 cpup->eq = new_cpup->eq;
12574
12575 /* Bump start_cpu to the next slot to minmize the
12576 * chance of having multiple unassigned CPU entries
12577 * selecting the same IRQ.
12578 */
12579 start_cpu = lpfc_next_present_cpu(new_cpu);
12580
12581 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12582 "3338 Set Affinity: CPU %d "
12583 "eq %d from peer cpu %d (%d/%d)\n",
12584 cpu, cpup->eq, new_cpu,
12585 new_cpup->phys_id, new_cpup->core_id);
12586 }
12587 }
12588
12589 /* Assign hdwq indices that are unique across all cpus in the map
12590 * that are also FIRST_CPUs.
12591 */
12592 idx = 0;
12593 for_each_present_cpu(cpu) {
12594 cpup = &phba->sli4_hba.cpu_map[cpu];
12595
12596 /* Only FIRST IRQs get a hdwq index assignment. */
12597 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12598 continue;
12599
12600 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12601 cpup->hdwq = idx;
12602 idx++;
12603 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12604 "3333 Set Affinity: CPU %d (phys %d core %d): "
12605 "hdwq %d eq %d flg x%x\n",
12606 cpu, cpup->phys_id, cpup->core_id,
12607 cpup->hdwq, cpup->eq, cpup->flag);
12608 }
12609 /* Associate a hdwq with each cpu_map entry
12610 * This will be 1 to 1 - hdwq to cpu, unless there are less
12611 * hardware queues then CPUs. For that case we will just round-robin
12612 * the available hardware queues as they get assigned to CPUs.
12613 * The next_idx is the idx from the FIRST_CPU loop above to account
12614 * for irq_chann < hdwq. The idx is used for round-robin assignments
12615 * and needs to start at 0.
12616 */
12617 next_idx = idx;
12618 start_cpu = 0;
12619 idx = 0;
12620 for_each_present_cpu(cpu) {
12621 cpup = &phba->sli4_hba.cpu_map[cpu];
12622
12623 /* FIRST cpus are already mapped. */
12624 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12625 continue;
12626
12627 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12628 * of the unassigned cpus to the next idx so that all
12629 * hdw queues are fully utilized.
12630 */
12631 if (next_idx < phba->cfg_hdw_queue) {
12632 cpup->hdwq = next_idx;
12633 next_idx++;
12634 continue;
12635 }
12636
12637 /* Not a First CPU and all hdw_queues are used. Reuse a
12638 * Hardware Queue for another CPU, so be smart about it
12639 * and pick one that has its IRQ/EQ mapped to the same phys_id
12640 * (CPU package) and core_id.
12641 */
12642 new_cpu = start_cpu;
12643 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12644 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12645 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12646 new_cpup->phys_id == cpup->phys_id &&
12647 new_cpup->core_id == cpup->core_id) {
12648 goto found_hdwq;
12649 }
12650 new_cpu = lpfc_next_present_cpu(new_cpu);
12651 }
12652
12653 /* If we can't match both phys_id and core_id,
12654 * settle for just a phys_id match.
12655 */
12656 new_cpu = start_cpu;
12657 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12658 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12659 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12660 new_cpup->phys_id == cpup->phys_id)
12661 goto found_hdwq;
12662 new_cpu = lpfc_next_present_cpu(new_cpu);
12663 }
12664
12665 /* Otherwise just round robin on cfg_hdw_queue */
12666 cpup->hdwq = idx % phba->cfg_hdw_queue;
12667 idx++;
12668 goto logit;
12669 found_hdwq:
12670 /* We found an available entry, copy the IRQ info */
12671 start_cpu = lpfc_next_present_cpu(new_cpu);
12672 cpup->hdwq = new_cpup->hdwq;
12673 logit:
12674 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12675 "3335 Set Affinity: CPU %d (phys %d core %d): "
12676 "hdwq %d eq %d flg x%x\n",
12677 cpu, cpup->phys_id, cpup->core_id,
12678 cpup->hdwq, cpup->eq, cpup->flag);
12679 }
12680
12681 /*
12682 * Initialize the cpu_map slots for not-present cpus in case
12683 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12684 */
12685 idx = 0;
12686 for_each_possible_cpu(cpu) {
12687 cpup = &phba->sli4_hba.cpu_map[cpu];
12688 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12689 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12690 c_stat->hdwq_no = cpup->hdwq;
12691 #endif
12692 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12693 continue;
12694
12695 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12696 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12697 c_stat->hdwq_no = cpup->hdwq;
12698 #endif
12699 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12700 "3340 Set Affinity: not present "
12701 "CPU %d hdwq %d\n",
12702 cpu, cpup->hdwq);
12703 }
12704
12705 /* The cpu_map array will be used later during initialization
12706 * when EQ / CQ / WQs are allocated and configured.
12707 */
12708 return;
12709 }
12710
12711 /**
12712 * lpfc_cpuhp_get_eq
12713 *
12714 * @phba: pointer to lpfc hba data structure.
12715 * @cpu: cpu going offline
12716 * @eqlist: eq list to append to
12717 */
12718 static int
lpfc_cpuhp_get_eq(struct lpfc_hba * phba,unsigned int cpu,struct list_head * eqlist)12719 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12720 struct list_head *eqlist)
12721 {
12722 const struct cpumask *maskp;
12723 struct lpfc_queue *eq;
12724 struct cpumask *tmp;
12725 u16 idx;
12726
12727 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12728 if (!tmp)
12729 return -ENOMEM;
12730
12731 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12732 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12733 if (!maskp)
12734 continue;
12735 /*
12736 * if irq is not affinitized to the cpu going
12737 * then we don't need to poll the eq attached
12738 * to it.
12739 */
12740 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12741 continue;
12742 /* get the cpus that are online and are affini-
12743 * tized to this irq vector. If the count is
12744 * more than 1 then cpuhp is not going to shut-
12745 * down this vector. Since this cpu has not
12746 * gone offline yet, we need >1.
12747 */
12748 cpumask_and(tmp, maskp, cpu_online_mask);
12749 if (cpumask_weight(tmp) > 1)
12750 continue;
12751
12752 /* Now that we have an irq to shutdown, get the eq
12753 * mapped to this irq. Note: multiple hdwq's in
12754 * the software can share an eq, but eventually
12755 * only eq will be mapped to this vector
12756 */
12757 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12758 list_add(&eq->_poll_list, eqlist);
12759 }
12760 kfree(tmp);
12761 return 0;
12762 }
12763
__lpfc_cpuhp_remove(struct lpfc_hba * phba)12764 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12765 {
12766 if (phba->sli_rev != LPFC_SLI_REV4)
12767 return;
12768
12769 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12770 &phba->cpuhp);
12771 /*
12772 * unregistering the instance doesn't stop the polling
12773 * timer. Wait for the poll timer to retire.
12774 */
12775 synchronize_rcu();
12776 del_timer_sync(&phba->cpuhp_poll_timer);
12777 }
12778
lpfc_cpuhp_remove(struct lpfc_hba * phba)12779 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12780 {
12781 if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE))
12782 return;
12783
12784 __lpfc_cpuhp_remove(phba);
12785 }
12786
lpfc_cpuhp_add(struct lpfc_hba * phba)12787 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12788 {
12789 if (phba->sli_rev != LPFC_SLI_REV4)
12790 return;
12791
12792 rcu_read_lock();
12793
12794 if (!list_empty(&phba->poll_list))
12795 mod_timer(&phba->cpuhp_poll_timer,
12796 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12797
12798 rcu_read_unlock();
12799
12800 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12801 &phba->cpuhp);
12802 }
12803
__lpfc_cpuhp_checks(struct lpfc_hba * phba,int * retval)12804 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12805 {
12806 if (phba->pport->load_flag & FC_UNLOADING) {
12807 *retval = -EAGAIN;
12808 return true;
12809 }
12810
12811 if (phba->sli_rev != LPFC_SLI_REV4) {
12812 *retval = 0;
12813 return true;
12814 }
12815
12816 /* proceed with the hotplug */
12817 return false;
12818 }
12819
12820 /**
12821 * lpfc_irq_set_aff - set IRQ affinity
12822 * @eqhdl: EQ handle
12823 * @cpu: cpu to set affinity
12824 *
12825 **/
12826 static inline void
lpfc_irq_set_aff(struct lpfc_hba_eq_hdl * eqhdl,unsigned int cpu)12827 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12828 {
12829 cpumask_clear(&eqhdl->aff_mask);
12830 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12831 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12832 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12833 }
12834
12835 /**
12836 * lpfc_irq_clear_aff - clear IRQ affinity
12837 * @eqhdl: EQ handle
12838 *
12839 **/
12840 static inline void
lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl * eqhdl)12841 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12842 {
12843 cpumask_clear(&eqhdl->aff_mask);
12844 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12845 }
12846
12847 /**
12848 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12849 * @phba: pointer to HBA context object.
12850 * @cpu: cpu going offline/online
12851 * @offline: true, cpu is going offline. false, cpu is coming online.
12852 *
12853 * If cpu is going offline, we'll try our best effort to find the next
12854 * online cpu on the phba's original_mask and migrate all offlining IRQ
12855 * affinities.
12856 *
12857 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12858 *
12859 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12860 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12861 *
12862 **/
12863 static void
lpfc_irq_rebalance(struct lpfc_hba * phba,unsigned int cpu,bool offline)12864 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12865 {
12866 struct lpfc_vector_map_info *cpup;
12867 struct cpumask *aff_mask;
12868 unsigned int cpu_select, cpu_next, idx;
12869 const struct cpumask *orig_mask;
12870
12871 if (phba->irq_chann_mode == NORMAL_MODE)
12872 return;
12873
12874 orig_mask = &phba->sli4_hba.irq_aff_mask;
12875
12876 if (!cpumask_test_cpu(cpu, orig_mask))
12877 return;
12878
12879 cpup = &phba->sli4_hba.cpu_map[cpu];
12880
12881 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12882 return;
12883
12884 if (offline) {
12885 /* Find next online CPU on original mask */
12886 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12887 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12888
12889 /* Found a valid CPU */
12890 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12891 /* Go through each eqhdl and ensure offlining
12892 * cpu aff_mask is migrated
12893 */
12894 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12895 aff_mask = lpfc_get_aff_mask(idx);
12896
12897 /* Migrate affinity */
12898 if (cpumask_test_cpu(cpu, aff_mask))
12899 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12900 cpu_select);
12901 }
12902 } else {
12903 /* Rely on irqbalance if no online CPUs left on NUMA */
12904 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12905 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12906 }
12907 } else {
12908 /* Migrate affinity back to this CPU */
12909 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12910 }
12911 }
12912
lpfc_cpu_offline(unsigned int cpu,struct hlist_node * node)12913 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12914 {
12915 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12916 struct lpfc_queue *eq, *next;
12917 LIST_HEAD(eqlist);
12918 int retval;
12919
12920 if (!phba) {
12921 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12922 return 0;
12923 }
12924
12925 if (__lpfc_cpuhp_checks(phba, &retval))
12926 return retval;
12927
12928 lpfc_irq_rebalance(phba, cpu, true);
12929
12930 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12931 if (retval)
12932 return retval;
12933
12934 /* start polling on these eq's */
12935 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12936 list_del_init(&eq->_poll_list);
12937 lpfc_sli4_start_polling(eq);
12938 }
12939
12940 return 0;
12941 }
12942
lpfc_cpu_online(unsigned int cpu,struct hlist_node * node)12943 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12944 {
12945 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12946 struct lpfc_queue *eq, *next;
12947 unsigned int n;
12948 int retval;
12949
12950 if (!phba) {
12951 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12952 return 0;
12953 }
12954
12955 if (__lpfc_cpuhp_checks(phba, &retval))
12956 return retval;
12957
12958 lpfc_irq_rebalance(phba, cpu, false);
12959
12960 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12961 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12962 if (n == cpu)
12963 lpfc_sli4_stop_polling(eq);
12964 }
12965
12966 return 0;
12967 }
12968
12969 /**
12970 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12971 * @phba: pointer to lpfc hba data structure.
12972 *
12973 * This routine is invoked to enable the MSI-X interrupt vectors to device
12974 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
12975 * to cpus on the system.
12976 *
12977 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12978 * the number of cpus on the same numa node as this adapter. The vectors are
12979 * allocated without requesting OS affinity mapping. A vector will be
12980 * allocated and assigned to each online and offline cpu. If the cpu is
12981 * online, then affinity will be set to that cpu. If the cpu is offline, then
12982 * affinity will be set to the nearest peer cpu within the numa node that is
12983 * online. If there are no online cpus within the numa node, affinity is not
12984 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12985 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12986 * configured.
12987 *
12988 * If numa mode is not enabled and there is more than 1 vector allocated, then
12989 * the driver relies on the managed irq interface where the OS assigns vector to
12990 * cpu affinity. The driver will then use that affinity mapping to setup its
12991 * cpu mapping table.
12992 *
12993 * Return codes
12994 * 0 - successful
12995 * other values - error
12996 **/
12997 static int
lpfc_sli4_enable_msix(struct lpfc_hba * phba)12998 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
12999 {
13000 int vectors, rc, index;
13001 char *name;
13002 const struct cpumask *aff_mask = NULL;
13003 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
13004 struct lpfc_vector_map_info *cpup;
13005 struct lpfc_hba_eq_hdl *eqhdl;
13006 const struct cpumask *maskp;
13007 unsigned int flags = PCI_IRQ_MSIX;
13008
13009 /* Set up MSI-X multi-message vectors */
13010 vectors = phba->cfg_irq_chann;
13011
13012 if (phba->irq_chann_mode != NORMAL_MODE)
13013 aff_mask = &phba->sli4_hba.irq_aff_mask;
13014
13015 if (aff_mask) {
13016 cpu_cnt = cpumask_weight(aff_mask);
13017 vectors = min(phba->cfg_irq_chann, cpu_cnt);
13018
13019 /* cpu: iterates over aff_mask including offline or online
13020 * cpu_select: iterates over online aff_mask to set affinity
13021 */
13022 cpu = cpumask_first(aff_mask);
13023 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13024 } else {
13025 flags |= PCI_IRQ_AFFINITY;
13026 }
13027
13028 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13029 if (rc < 0) {
13030 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13031 "0484 PCI enable MSI-X failed (%d)\n", rc);
13032 goto vec_fail_out;
13033 }
13034 vectors = rc;
13035
13036 /* Assign MSI-X vectors to interrupt handlers */
13037 for (index = 0; index < vectors; index++) {
13038 eqhdl = lpfc_get_eq_hdl(index);
13039 name = eqhdl->handler_name;
13040 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
13041 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
13042 LPFC_DRIVER_HANDLER_NAME"%d", index);
13043
13044 eqhdl->idx = index;
13045 rc = pci_irq_vector(phba->pcidev, index);
13046 if (rc < 0) {
13047 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13048 "0489 MSI-X fast-path (%d) "
13049 "pci_irq_vec failed (%d)\n", index, rc);
13050 goto cfg_fail_out;
13051 }
13052 eqhdl->irq = rc;
13053
13054 rc = request_threaded_irq(eqhdl->irq,
13055 &lpfc_sli4_hba_intr_handler,
13056 &lpfc_sli4_hba_intr_handler_th,
13057 0, name, eqhdl);
13058 if (rc) {
13059 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13060 "0486 MSI-X fast-path (%d) "
13061 "request_irq failed (%d)\n", index, rc);
13062 goto cfg_fail_out;
13063 }
13064
13065 if (aff_mask) {
13066 /* If found a neighboring online cpu, set affinity */
13067 if (cpu_select < nr_cpu_ids)
13068 lpfc_irq_set_aff(eqhdl, cpu_select);
13069
13070 /* Assign EQ to cpu_map */
13071 lpfc_assign_eq_map_info(phba, index,
13072 LPFC_CPU_FIRST_IRQ,
13073 cpu);
13074
13075 /* Iterate to next offline or online cpu in aff_mask */
13076 cpu = cpumask_next(cpu, aff_mask);
13077
13078 /* Find next online cpu in aff_mask to set affinity */
13079 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13080 } else if (vectors == 1) {
13081 cpu = cpumask_first(cpu_present_mask);
13082 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13083 cpu);
13084 } else {
13085 maskp = pci_irq_get_affinity(phba->pcidev, index);
13086
13087 /* Loop through all CPUs associated with vector index */
13088 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
13089 cpup = &phba->sli4_hba.cpu_map[cpu];
13090
13091 /* If this is the first CPU thats assigned to
13092 * this vector, set LPFC_CPU_FIRST_IRQ.
13093 *
13094 * With certain platforms its possible that irq
13095 * vectors are affinitized to all the cpu's.
13096 * This can result in each cpu_map.eq to be set
13097 * to the last vector, resulting in overwrite
13098 * of all the previous cpu_map.eq. Ensure that
13099 * each vector receives a place in cpu_map.
13100 * Later call to lpfc_cpu_affinity_check will
13101 * ensure we are nicely balanced out.
13102 */
13103 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13104 continue;
13105 lpfc_assign_eq_map_info(phba, index,
13106 LPFC_CPU_FIRST_IRQ,
13107 cpu);
13108 break;
13109 }
13110 }
13111 }
13112
13113 if (vectors != phba->cfg_irq_chann) {
13114 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13115 "3238 Reducing IO channels to match number of "
13116 "MSI-X vectors, requested %d got %d\n",
13117 phba->cfg_irq_chann, vectors);
13118 if (phba->cfg_irq_chann > vectors)
13119 phba->cfg_irq_chann = vectors;
13120 }
13121
13122 return rc;
13123
13124 cfg_fail_out:
13125 /* free the irq already requested */
13126 for (--index; index >= 0; index--) {
13127 eqhdl = lpfc_get_eq_hdl(index);
13128 lpfc_irq_clear_aff(eqhdl);
13129 free_irq(eqhdl->irq, eqhdl);
13130 }
13131
13132 /* Unconfigure MSI-X capability structure */
13133 pci_free_irq_vectors(phba->pcidev);
13134
13135 vec_fail_out:
13136 return rc;
13137 }
13138
13139 /**
13140 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13141 * @phba: pointer to lpfc hba data structure.
13142 *
13143 * This routine is invoked to enable the MSI interrupt mode to device with
13144 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13145 * called to enable the MSI vector. The device driver is responsible for
13146 * calling the request_irq() to register MSI vector with a interrupt the
13147 * handler, which is done in this function.
13148 *
13149 * Return codes
13150 * 0 - successful
13151 * other values - error
13152 **/
13153 static int
lpfc_sli4_enable_msi(struct lpfc_hba * phba)13154 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13155 {
13156 int rc, index;
13157 unsigned int cpu;
13158 struct lpfc_hba_eq_hdl *eqhdl;
13159
13160 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13161 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13162 if (rc > 0)
13163 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13164 "0487 PCI enable MSI mode success.\n");
13165 else {
13166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13167 "0488 PCI enable MSI mode failed (%d)\n", rc);
13168 return rc ? rc : -1;
13169 }
13170
13171 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13172 0, LPFC_DRIVER_NAME, phba);
13173 if (rc) {
13174 pci_free_irq_vectors(phba->pcidev);
13175 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13176 "0490 MSI request_irq failed (%d)\n", rc);
13177 return rc;
13178 }
13179
13180 eqhdl = lpfc_get_eq_hdl(0);
13181 rc = pci_irq_vector(phba->pcidev, 0);
13182 if (rc < 0) {
13183 pci_free_irq_vectors(phba->pcidev);
13184 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13185 "0496 MSI pci_irq_vec failed (%d)\n", rc);
13186 return rc;
13187 }
13188 eqhdl->irq = rc;
13189
13190 cpu = cpumask_first(cpu_present_mask);
13191 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13192
13193 for (index = 0; index < phba->cfg_irq_chann; index++) {
13194 eqhdl = lpfc_get_eq_hdl(index);
13195 eqhdl->idx = index;
13196 }
13197
13198 return 0;
13199 }
13200
13201 /**
13202 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13203 * @phba: pointer to lpfc hba data structure.
13204 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13205 *
13206 * This routine is invoked to enable device interrupt and associate driver's
13207 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13208 * interface spec. Depends on the interrupt mode configured to the driver,
13209 * the driver will try to fallback from the configured interrupt mode to an
13210 * interrupt mode which is supported by the platform, kernel, and device in
13211 * the order of:
13212 * MSI-X -> MSI -> IRQ.
13213 *
13214 * Return codes
13215 * Interrupt mode (2, 1, 0) - successful
13216 * LPFC_INTR_ERROR - error
13217 **/
13218 static uint32_t
lpfc_sli4_enable_intr(struct lpfc_hba * phba,uint32_t cfg_mode)13219 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13220 {
13221 uint32_t intr_mode = LPFC_INTR_ERROR;
13222 int retval, idx;
13223
13224 if (cfg_mode == 2) {
13225 /* Preparation before conf_msi mbox cmd */
13226 retval = 0;
13227 if (!retval) {
13228 /* Now, try to enable MSI-X interrupt mode */
13229 retval = lpfc_sli4_enable_msix(phba);
13230 if (!retval) {
13231 /* Indicate initialization to MSI-X mode */
13232 phba->intr_type = MSIX;
13233 intr_mode = 2;
13234 }
13235 }
13236 }
13237
13238 /* Fallback to MSI if MSI-X initialization failed */
13239 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13240 retval = lpfc_sli4_enable_msi(phba);
13241 if (!retval) {
13242 /* Indicate initialization to MSI mode */
13243 phba->intr_type = MSI;
13244 intr_mode = 1;
13245 }
13246 }
13247
13248 /* Fallback to INTx if both MSI-X/MSI initalization failed */
13249 if (phba->intr_type == NONE) {
13250 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13251 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13252 if (!retval) {
13253 struct lpfc_hba_eq_hdl *eqhdl;
13254 unsigned int cpu;
13255
13256 /* Indicate initialization to INTx mode */
13257 phba->intr_type = INTx;
13258 intr_mode = 0;
13259
13260 eqhdl = lpfc_get_eq_hdl(0);
13261 retval = pci_irq_vector(phba->pcidev, 0);
13262 if (retval < 0) {
13263 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13264 "0502 INTR pci_irq_vec failed (%d)\n",
13265 retval);
13266 return LPFC_INTR_ERROR;
13267 }
13268 eqhdl->irq = retval;
13269
13270 cpu = cpumask_first(cpu_present_mask);
13271 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13272 cpu);
13273 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13274 eqhdl = lpfc_get_eq_hdl(idx);
13275 eqhdl->idx = idx;
13276 }
13277 }
13278 }
13279 return intr_mode;
13280 }
13281
13282 /**
13283 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13284 * @phba: pointer to lpfc hba data structure.
13285 *
13286 * This routine is invoked to disable device interrupt and disassociate
13287 * the driver's interrupt handler(s) from interrupt vector(s) to device
13288 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13289 * will release the interrupt vector(s) for the message signaled interrupt.
13290 **/
13291 static void
lpfc_sli4_disable_intr(struct lpfc_hba * phba)13292 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13293 {
13294 /* Disable the currently initialized interrupt mode */
13295 if (phba->intr_type == MSIX) {
13296 int index;
13297 struct lpfc_hba_eq_hdl *eqhdl;
13298
13299 /* Free up MSI-X multi-message vectors */
13300 for (index = 0; index < phba->cfg_irq_chann; index++) {
13301 eqhdl = lpfc_get_eq_hdl(index);
13302 lpfc_irq_clear_aff(eqhdl);
13303 free_irq(eqhdl->irq, eqhdl);
13304 }
13305 } else {
13306 free_irq(phba->pcidev->irq, phba);
13307 }
13308
13309 pci_free_irq_vectors(phba->pcidev);
13310
13311 /* Reset interrupt management states */
13312 phba->intr_type = NONE;
13313 phba->sli.slistat.sli_intr = 0;
13314 }
13315
13316 /**
13317 * lpfc_unset_hba - Unset SLI3 hba device initialization
13318 * @phba: pointer to lpfc hba data structure.
13319 *
13320 * This routine is invoked to unset the HBA device initialization steps to
13321 * a device with SLI-3 interface spec.
13322 **/
13323 static void
lpfc_unset_hba(struct lpfc_hba * phba)13324 lpfc_unset_hba(struct lpfc_hba *phba)
13325 {
13326 struct lpfc_vport *vport = phba->pport;
13327 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
13328
13329 spin_lock_irq(shost->host_lock);
13330 vport->load_flag |= FC_UNLOADING;
13331 spin_unlock_irq(shost->host_lock);
13332
13333 kfree(phba->vpi_bmask);
13334 kfree(phba->vpi_ids);
13335
13336 lpfc_stop_hba_timers(phba);
13337
13338 phba->pport->work_port_events = 0;
13339
13340 lpfc_sli_hba_down(phba);
13341
13342 lpfc_sli_brdrestart(phba);
13343
13344 lpfc_sli_disable_intr(phba);
13345
13346 return;
13347 }
13348
13349 /**
13350 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13351 * @phba: Pointer to HBA context object.
13352 *
13353 * This function is called in the SLI4 code path to wait for completion
13354 * of device's XRIs exchange busy. It will check the XRI exchange busy
13355 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13356 * that, it will check the XRI exchange busy on outstanding FCP and ELS
13357 * I/Os every 30 seconds, log error message, and wait forever. Only when
13358 * all XRI exchange busy complete, the driver unload shall proceed with
13359 * invoking the function reset ioctl mailbox command to the CNA and the
13360 * the rest of the driver unload resource release.
13361 **/
13362 static void
lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba * phba)13363 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13364 {
13365 struct lpfc_sli4_hdw_queue *qp;
13366 int idx, ccnt;
13367 int wait_time = 0;
13368 int io_xri_cmpl = 1;
13369 int nvmet_xri_cmpl = 1;
13370 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13371
13372 /* Driver just aborted IOs during the hba_unset process. Pause
13373 * here to give the HBA time to complete the IO and get entries
13374 * into the abts lists.
13375 */
13376 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13377
13378 /* Wait for NVME pending IO to flush back to transport. */
13379 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13380 lpfc_nvme_wait_for_io_drain(phba);
13381
13382 ccnt = 0;
13383 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13384 qp = &phba->sli4_hba.hdwq[idx];
13385 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13386 if (!io_xri_cmpl) /* if list is NOT empty */
13387 ccnt++;
13388 }
13389 if (ccnt)
13390 io_xri_cmpl = 0;
13391
13392 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13393 nvmet_xri_cmpl =
13394 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13395 }
13396
13397 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13398 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13399 if (!nvmet_xri_cmpl)
13400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13401 "6424 NVMET XRI exchange busy "
13402 "wait time: %d seconds.\n",
13403 wait_time/1000);
13404 if (!io_xri_cmpl)
13405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13406 "6100 IO XRI exchange busy "
13407 "wait time: %d seconds.\n",
13408 wait_time/1000);
13409 if (!els_xri_cmpl)
13410 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13411 "2878 ELS XRI exchange busy "
13412 "wait time: %d seconds.\n",
13413 wait_time/1000);
13414 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13415 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13416 } else {
13417 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13418 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13419 }
13420
13421 ccnt = 0;
13422 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13423 qp = &phba->sli4_hba.hdwq[idx];
13424 io_xri_cmpl = list_empty(
13425 &qp->lpfc_abts_io_buf_list);
13426 if (!io_xri_cmpl) /* if list is NOT empty */
13427 ccnt++;
13428 }
13429 if (ccnt)
13430 io_xri_cmpl = 0;
13431
13432 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13433 nvmet_xri_cmpl = list_empty(
13434 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13435 }
13436 els_xri_cmpl =
13437 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13438
13439 }
13440 }
13441
13442 /**
13443 * lpfc_sli4_hba_unset - Unset the fcoe hba
13444 * @phba: Pointer to HBA context object.
13445 *
13446 * This function is called in the SLI4 code path to reset the HBA's FCoE
13447 * function. The caller is not required to hold any lock. This routine
13448 * issues PCI function reset mailbox command to reset the FCoE function.
13449 * At the end of the function, it calls lpfc_hba_down_post function to
13450 * free any pending commands.
13451 **/
13452 static void
lpfc_sli4_hba_unset(struct lpfc_hba * phba)13453 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13454 {
13455 int wait_cnt = 0;
13456 LPFC_MBOXQ_t *mboxq;
13457 struct pci_dev *pdev = phba->pcidev;
13458
13459 lpfc_stop_hba_timers(phba);
13460 hrtimer_cancel(&phba->cmf_stats_timer);
13461 hrtimer_cancel(&phba->cmf_timer);
13462
13463 if (phba->pport)
13464 phba->sli4_hba.intr_enable = 0;
13465
13466 /*
13467 * Gracefully wait out the potential current outstanding asynchronous
13468 * mailbox command.
13469 */
13470
13471 /* First, block any pending async mailbox command from posted */
13472 spin_lock_irq(&phba->hbalock);
13473 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13474 spin_unlock_irq(&phba->hbalock);
13475 /* Now, trying to wait it out if we can */
13476 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13477 msleep(10);
13478 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13479 break;
13480 }
13481 /* Forcefully release the outstanding mailbox command if timed out */
13482 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13483 spin_lock_irq(&phba->hbalock);
13484 mboxq = phba->sli.mbox_active;
13485 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13486 __lpfc_mbox_cmpl_put(phba, mboxq);
13487 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13488 phba->sli.mbox_active = NULL;
13489 spin_unlock_irq(&phba->hbalock);
13490 }
13491
13492 /* Abort all iocbs associated with the hba */
13493 lpfc_sli_hba_iocb_abort(phba);
13494
13495 if (!pci_channel_offline(phba->pcidev))
13496 /* Wait for completion of device XRI exchange busy */
13497 lpfc_sli4_xri_exchange_busy_wait(phba);
13498
13499 /* per-phba callback de-registration for hotplug event */
13500 if (phba->pport)
13501 lpfc_cpuhp_remove(phba);
13502
13503 /* Disable PCI subsystem interrupt */
13504 lpfc_sli4_disable_intr(phba);
13505
13506 /* Disable SR-IOV if enabled */
13507 if (phba->cfg_sriov_nr_virtfn)
13508 pci_disable_sriov(pdev);
13509
13510 /* Stop kthread signal shall trigger work_done one more time */
13511 kthread_stop(phba->worker_thread);
13512
13513 /* Disable FW logging to host memory */
13514 lpfc_ras_stop_fwlog(phba);
13515
13516 lpfc_sli4_queue_unset(phba);
13517
13518 /* Reset SLI4 HBA FCoE function */
13519 lpfc_pci_function_reset(phba);
13520
13521 /* release all queue allocated resources. */
13522 lpfc_sli4_queue_destroy(phba);
13523
13524 /* Free RAS DMA memory */
13525 if (phba->ras_fwlog.ras_enabled)
13526 lpfc_sli4_ras_dma_free(phba);
13527
13528 /* Stop the SLI4 device port */
13529 if (phba->pport)
13530 phba->pport->work_port_events = 0;
13531 }
13532
13533 static uint32_t
lpfc_cgn_crc32(uint32_t crc,u8 byte)13534 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13535 {
13536 uint32_t msb = 0;
13537 uint32_t bit;
13538
13539 for (bit = 0; bit < 8; bit++) {
13540 msb = (crc >> 31) & 1;
13541 crc <<= 1;
13542
13543 if (msb ^ (byte & 1)) {
13544 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13545 crc |= 1;
13546 }
13547 byte >>= 1;
13548 }
13549 return crc;
13550 }
13551
13552 static uint32_t
lpfc_cgn_reverse_bits(uint32_t wd)13553 lpfc_cgn_reverse_bits(uint32_t wd)
13554 {
13555 uint32_t result = 0;
13556 uint32_t i;
13557
13558 for (i = 0; i < 32; i++) {
13559 result <<= 1;
13560 result |= (1 & (wd >> i));
13561 }
13562 return result;
13563 }
13564
13565 /*
13566 * The routine corresponds with the algorithm the HBA firmware
13567 * uses to validate the data integrity.
13568 */
13569 uint32_t
lpfc_cgn_calc_crc32(void * ptr,uint32_t byteLen,uint32_t crc)13570 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13571 {
13572 uint32_t i;
13573 uint32_t result;
13574 uint8_t *data = (uint8_t *)ptr;
13575
13576 for (i = 0; i < byteLen; ++i)
13577 crc = lpfc_cgn_crc32(crc, data[i]);
13578
13579 result = ~lpfc_cgn_reverse_bits(crc);
13580 return result;
13581 }
13582
13583 void
lpfc_init_congestion_buf(struct lpfc_hba * phba)13584 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13585 {
13586 struct lpfc_cgn_info *cp;
13587 uint16_t size;
13588 uint32_t crc;
13589
13590 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13591 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13592
13593 if (!phba->cgn_i)
13594 return;
13595 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13596
13597 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13598 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13599 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13600 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13601
13602 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13603 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13604 atomic64_set(&phba->cgn_latency_evt, 0);
13605 phba->cgn_evt_minute = 0;
13606
13607 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13608 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13609 cp->cgn_info_version = LPFC_CGN_INFO_V4;
13610
13611 /* cgn parameters */
13612 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13613 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13614 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13615 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13616
13617 lpfc_cgn_update_tstamp(phba, &cp->base_time);
13618
13619 /* Fill in default LUN qdepth */
13620 if (phba->pport) {
13621 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13622 cp->cgn_lunq = cpu_to_le16(size);
13623 }
13624
13625 /* last used Index initialized to 0xff already */
13626
13627 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13628 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13629 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13630 cp->cgn_info_crc = cpu_to_le32(crc);
13631
13632 phba->cgn_evt_timestamp = jiffies +
13633 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13634 }
13635
13636 void
lpfc_init_congestion_stat(struct lpfc_hba * phba)13637 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13638 {
13639 struct lpfc_cgn_info *cp;
13640 uint32_t crc;
13641
13642 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13643 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13644
13645 if (!phba->cgn_i)
13646 return;
13647
13648 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13649 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13650
13651 lpfc_cgn_update_tstamp(phba, &cp->stat_start);
13652 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13653 cp->cgn_info_crc = cpu_to_le32(crc);
13654 }
13655
13656 /**
13657 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13658 * @phba: Pointer to hba context object.
13659 * @reg: flag to determine register or unregister.
13660 */
13661 static int
__lpfc_reg_congestion_buf(struct lpfc_hba * phba,int reg)13662 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13663 {
13664 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13665 union lpfc_sli4_cfg_shdr *shdr;
13666 uint32_t shdr_status, shdr_add_status;
13667 LPFC_MBOXQ_t *mboxq;
13668 int length, rc;
13669
13670 if (!phba->cgn_i)
13671 return -ENXIO;
13672
13673 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13674 if (!mboxq) {
13675 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13676 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13677 "HBA state x%x reg %d\n",
13678 phba->pport->port_state, reg);
13679 return -ENOMEM;
13680 }
13681
13682 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13683 sizeof(struct lpfc_sli4_cfg_mhdr));
13684 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13685 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13686 LPFC_SLI4_MBX_EMBED);
13687 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13688 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13689 if (reg > 0)
13690 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13691 else
13692 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13693 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13694 reg_congestion_buf->addr_lo =
13695 putPaddrLow(phba->cgn_i->phys);
13696 reg_congestion_buf->addr_hi =
13697 putPaddrHigh(phba->cgn_i->phys);
13698
13699 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13700 shdr = (union lpfc_sli4_cfg_shdr *)
13701 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13702 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13703 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13704 &shdr->response);
13705 mempool_free(mboxq, phba->mbox_mem_pool);
13706 if (shdr_status || shdr_add_status || rc) {
13707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13708 "2642 REG_CONGESTION_BUF mailbox "
13709 "failed with status x%x add_status x%x,"
13710 " mbx status x%x reg %d\n",
13711 shdr_status, shdr_add_status, rc, reg);
13712 return -ENXIO;
13713 }
13714 return 0;
13715 }
13716
13717 int
lpfc_unreg_congestion_buf(struct lpfc_hba * phba)13718 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13719 {
13720 lpfc_cmf_stop(phba);
13721 return __lpfc_reg_congestion_buf(phba, 0);
13722 }
13723
13724 int
lpfc_reg_congestion_buf(struct lpfc_hba * phba)13725 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13726 {
13727 return __lpfc_reg_congestion_buf(phba, 1);
13728 }
13729
13730 /**
13731 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13732 * @phba: Pointer to HBA context object.
13733 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13734 *
13735 * This function is called in the SLI4 code path to read the port's
13736 * sli4 capabilities.
13737 *
13738 * This function may be be called from any context that can block-wait
13739 * for the completion. The expectation is that this routine is called
13740 * typically from probe_one or from the online routine.
13741 **/
13742 int
lpfc_get_sli4_parameters(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)13743 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13744 {
13745 int rc;
13746 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13747 struct lpfc_pc_sli4_params *sli4_params;
13748 uint32_t mbox_tmo;
13749 int length;
13750 bool exp_wqcq_pages = true;
13751 struct lpfc_sli4_parameters *mbx_sli4_parameters;
13752
13753 /*
13754 * By default, the driver assumes the SLI4 port requires RPI
13755 * header postings. The SLI4_PARAM response will correct this
13756 * assumption.
13757 */
13758 phba->sli4_hba.rpi_hdrs_in_use = 1;
13759
13760 /* Read the port's SLI4 Config Parameters */
13761 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13762 sizeof(struct lpfc_sli4_cfg_mhdr));
13763 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13764 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13765 length, LPFC_SLI4_MBX_EMBED);
13766 if (!phba->sli4_hba.intr_enable)
13767 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13768 else {
13769 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13770 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13771 }
13772 if (unlikely(rc))
13773 return rc;
13774 sli4_params = &phba->sli4_hba.pc_sli4_params;
13775 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13776 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13777 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13778 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13779 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13780 mbx_sli4_parameters);
13781 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13782 mbx_sli4_parameters);
13783 if (bf_get(cfg_phwq, mbx_sli4_parameters))
13784 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13785 else
13786 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13787 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13788 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13789 mbx_sli4_parameters);
13790 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13791 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13792 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13793 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13794 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13795 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13796 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13797 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13798 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13799 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13800 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13801 mbx_sli4_parameters);
13802 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13803 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13804 mbx_sli4_parameters);
13805 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13806 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13807 sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters);
13808
13809 /* Check for Extended Pre-Registered SGL support */
13810 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13811
13812 /* Check for firmware nvme support */
13813 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13814 bf_get(cfg_xib, mbx_sli4_parameters));
13815
13816 if (rc) {
13817 /* Save this to indicate the Firmware supports NVME */
13818 sli4_params->nvme = 1;
13819
13820 /* Firmware NVME support, check driver FC4 NVME support */
13821 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13822 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13823 "6133 Disabling NVME support: "
13824 "FC4 type not supported: x%x\n",
13825 phba->cfg_enable_fc4_type);
13826 goto fcponly;
13827 }
13828 } else {
13829 /* No firmware NVME support, check driver FC4 NVME support */
13830 sli4_params->nvme = 0;
13831 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13833 "6101 Disabling NVME support: Not "
13834 "supported by firmware (%d %d) x%x\n",
13835 bf_get(cfg_nvme, mbx_sli4_parameters),
13836 bf_get(cfg_xib, mbx_sli4_parameters),
13837 phba->cfg_enable_fc4_type);
13838 fcponly:
13839 phba->nvmet_support = 0;
13840 phba->cfg_nvmet_mrq = 0;
13841 phba->cfg_nvme_seg_cnt = 0;
13842
13843 /* If no FC4 type support, move to just SCSI support */
13844 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13845 return -ENODEV;
13846 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13847 }
13848 }
13849
13850 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13851 * accommodate 512K and 1M IOs in a single nvme buf.
13852 */
13853 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13854 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13855
13856 /* Enable embedded Payload BDE if support is indicated */
13857 if (bf_get(cfg_pbde, mbx_sli4_parameters))
13858 phba->cfg_enable_pbde = 1;
13859 else
13860 phba->cfg_enable_pbde = 0;
13861
13862 /*
13863 * To support Suppress Response feature we must satisfy 3 conditions.
13864 * lpfc_suppress_rsp module parameter must be set (default).
13865 * In SLI4-Parameters Descriptor:
13866 * Extended Inline Buffers (XIB) must be supported.
13867 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13868 * (double negative).
13869 */
13870 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13871 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13872 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13873 else
13874 phba->cfg_suppress_rsp = 0;
13875
13876 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13877 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13878
13879 /* Make sure that sge_supp_len can be handled by the driver */
13880 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13881 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13882
13883 rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
13884 if (unlikely(rc)) {
13885 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13886 "6400 Can't set dma maximum segment size\n");
13887 return rc;
13888 }
13889
13890 /*
13891 * Check whether the adapter supports an embedded copy of the
13892 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13893 * to use this option, 128-byte WQEs must be used.
13894 */
13895 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13896 phba->fcp_embed_io = 1;
13897 else
13898 phba->fcp_embed_io = 0;
13899
13900 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13901 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13902 bf_get(cfg_xib, mbx_sli4_parameters),
13903 phba->cfg_enable_pbde,
13904 phba->fcp_embed_io, sli4_params->nvme,
13905 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13906
13907 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13908 LPFC_SLI_INTF_IF_TYPE_2) &&
13909 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13910 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13911 exp_wqcq_pages = false;
13912
13913 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13914 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13915 exp_wqcq_pages &&
13916 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13917 phba->enab_exp_wqcq_pages = 1;
13918 else
13919 phba->enab_exp_wqcq_pages = 0;
13920 /*
13921 * Check if the SLI port supports MDS Diagnostics
13922 */
13923 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13924 phba->mds_diags_support = 1;
13925 else
13926 phba->mds_diags_support = 0;
13927
13928 /*
13929 * Check if the SLI port supports NSLER
13930 */
13931 if (bf_get(cfg_nsler, mbx_sli4_parameters))
13932 phba->nsler = 1;
13933 else
13934 phba->nsler = 0;
13935
13936 return 0;
13937 }
13938
13939 /**
13940 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13941 * @pdev: pointer to PCI device
13942 * @pid: pointer to PCI device identifier
13943 *
13944 * This routine is to be called to attach a device with SLI-3 interface spec
13945 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13946 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13947 * information of the device and driver to see if the driver state that it can
13948 * support this kind of device. If the match is successful, the driver core
13949 * invokes this routine. If this routine determines it can claim the HBA, it
13950 * does all the initialization that it needs to do to handle the HBA properly.
13951 *
13952 * Return code
13953 * 0 - driver can claim the device
13954 * negative value - driver can not claim the device
13955 **/
13956 static int
lpfc_pci_probe_one_s3(struct pci_dev * pdev,const struct pci_device_id * pid)13957 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13958 {
13959 struct lpfc_hba *phba;
13960 struct lpfc_vport *vport = NULL;
13961 struct Scsi_Host *shost = NULL;
13962 int error;
13963 uint32_t cfg_mode, intr_mode;
13964
13965 /* Allocate memory for HBA structure */
13966 phba = lpfc_hba_alloc(pdev);
13967 if (!phba)
13968 return -ENOMEM;
13969
13970 /* Perform generic PCI device enabling operation */
13971 error = lpfc_enable_pci_dev(phba);
13972 if (error)
13973 goto out_free_phba;
13974
13975 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13976 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13977 if (error)
13978 goto out_disable_pci_dev;
13979
13980 /* Set up SLI-3 specific device PCI memory space */
13981 error = lpfc_sli_pci_mem_setup(phba);
13982 if (error) {
13983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13984 "1402 Failed to set up pci memory space.\n");
13985 goto out_disable_pci_dev;
13986 }
13987
13988 /* Set up SLI-3 specific device driver resources */
13989 error = lpfc_sli_driver_resource_setup(phba);
13990 if (error) {
13991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13992 "1404 Failed to set up driver resource.\n");
13993 goto out_unset_pci_mem_s3;
13994 }
13995
13996 /* Initialize and populate the iocb list per host */
13997
13998 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13999 if (error) {
14000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14001 "1405 Failed to initialize iocb list.\n");
14002 goto out_unset_driver_resource_s3;
14003 }
14004
14005 /* Set up common device driver resources */
14006 error = lpfc_setup_driver_resource_phase2(phba);
14007 if (error) {
14008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14009 "1406 Failed to set up driver resource.\n");
14010 goto out_free_iocb_list;
14011 }
14012
14013 /* Get the default values for Model Name and Description */
14014 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14015
14016 /* Create SCSI host to the physical port */
14017 error = lpfc_create_shost(phba);
14018 if (error) {
14019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14020 "1407 Failed to create scsi host.\n");
14021 goto out_unset_driver_resource;
14022 }
14023
14024 /* Configure sysfs attributes */
14025 vport = phba->pport;
14026 error = lpfc_alloc_sysfs_attr(vport);
14027 if (error) {
14028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14029 "1476 Failed to allocate sysfs attr\n");
14030 goto out_destroy_shost;
14031 }
14032
14033 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14034 /* Now, trying to enable interrupt and bring up the device */
14035 cfg_mode = phba->cfg_use_msi;
14036 while (true) {
14037 /* Put device to a known state before enabling interrupt */
14038 lpfc_stop_port(phba);
14039 /* Configure and enable interrupt */
14040 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
14041 if (intr_mode == LPFC_INTR_ERROR) {
14042 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14043 "0431 Failed to enable interrupt.\n");
14044 error = -ENODEV;
14045 goto out_free_sysfs_attr;
14046 }
14047 /* SLI-3 HBA setup */
14048 if (lpfc_sli_hba_setup(phba)) {
14049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14050 "1477 Failed to set up hba\n");
14051 error = -ENODEV;
14052 goto out_remove_device;
14053 }
14054
14055 /* Wait 50ms for the interrupts of previous mailbox commands */
14056 msleep(50);
14057 /* Check active interrupts on message signaled interrupts */
14058 if (intr_mode == 0 ||
14059 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14060 /* Log the current active interrupt mode */
14061 phba->intr_mode = intr_mode;
14062 lpfc_log_intr_mode(phba, intr_mode);
14063 break;
14064 } else {
14065 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14066 "0447 Configure interrupt mode (%d) "
14067 "failed active interrupt test.\n",
14068 intr_mode);
14069 /* Disable the current interrupt mode */
14070 lpfc_sli_disable_intr(phba);
14071 /* Try next level of interrupt mode */
14072 cfg_mode = --intr_mode;
14073 }
14074 }
14075
14076 /* Perform post initialization setup */
14077 lpfc_post_init_setup(phba);
14078
14079 /* Check if there are static vports to be created. */
14080 lpfc_create_static_vport(phba);
14081
14082 return 0;
14083
14084 out_remove_device:
14085 lpfc_unset_hba(phba);
14086 out_free_sysfs_attr:
14087 lpfc_free_sysfs_attr(vport);
14088 out_destroy_shost:
14089 lpfc_destroy_shost(phba);
14090 out_unset_driver_resource:
14091 lpfc_unset_driver_resource_phase2(phba);
14092 out_free_iocb_list:
14093 lpfc_free_iocb_list(phba);
14094 out_unset_driver_resource_s3:
14095 lpfc_sli_driver_resource_unset(phba);
14096 out_unset_pci_mem_s3:
14097 lpfc_sli_pci_mem_unset(phba);
14098 out_disable_pci_dev:
14099 lpfc_disable_pci_dev(phba);
14100 if (shost)
14101 scsi_host_put(shost);
14102 out_free_phba:
14103 lpfc_hba_free(phba);
14104 return error;
14105 }
14106
14107 /**
14108 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14109 * @pdev: pointer to PCI device
14110 *
14111 * This routine is to be called to disattach a device with SLI-3 interface
14112 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14113 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14114 * device to be removed from the PCI subsystem properly.
14115 **/
14116 static void
lpfc_pci_remove_one_s3(struct pci_dev * pdev)14117 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14118 {
14119 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14120 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14121 struct lpfc_vport **vports;
14122 struct lpfc_hba *phba = vport->phba;
14123 int i;
14124
14125 spin_lock_irq(&phba->hbalock);
14126 vport->load_flag |= FC_UNLOADING;
14127 spin_unlock_irq(&phba->hbalock);
14128
14129 lpfc_free_sysfs_attr(vport);
14130
14131 /* Release all the vports against this physical port */
14132 vports = lpfc_create_vport_work_array(phba);
14133 if (vports != NULL)
14134 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14135 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14136 continue;
14137 fc_vport_terminate(vports[i]->fc_vport);
14138 }
14139 lpfc_destroy_vport_work_array(phba, vports);
14140
14141 /* Remove FC host with the physical port */
14142 fc_remove_host(shost);
14143 scsi_remove_host(shost);
14144
14145 /* Clean up all nodes, mailboxes and IOs. */
14146 lpfc_cleanup(vport);
14147
14148 /*
14149 * Bring down the SLI Layer. This step disable all interrupts,
14150 * clears the rings, discards all mailbox commands, and resets
14151 * the HBA.
14152 */
14153
14154 /* HBA interrupt will be disabled after this call */
14155 lpfc_sli_hba_down(phba);
14156 /* Stop kthread signal shall trigger work_done one more time */
14157 kthread_stop(phba->worker_thread);
14158 /* Final cleanup of txcmplq and reset the HBA */
14159 lpfc_sli_brdrestart(phba);
14160
14161 kfree(phba->vpi_bmask);
14162 kfree(phba->vpi_ids);
14163
14164 lpfc_stop_hba_timers(phba);
14165 spin_lock_irq(&phba->port_list_lock);
14166 list_del_init(&vport->listentry);
14167 spin_unlock_irq(&phba->port_list_lock);
14168
14169 lpfc_debugfs_terminate(vport);
14170
14171 /* Disable SR-IOV if enabled */
14172 if (phba->cfg_sriov_nr_virtfn)
14173 pci_disable_sriov(pdev);
14174
14175 /* Disable interrupt */
14176 lpfc_sli_disable_intr(phba);
14177
14178 scsi_host_put(shost);
14179
14180 /*
14181 * Call scsi_free before mem_free since scsi bufs are released to their
14182 * corresponding pools here.
14183 */
14184 lpfc_scsi_free(phba);
14185 lpfc_free_iocb_list(phba);
14186
14187 lpfc_mem_free_all(phba);
14188
14189 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14190 phba->hbqslimp.virt, phba->hbqslimp.phys);
14191
14192 /* Free resources associated with SLI2 interface */
14193 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14194 phba->slim2p.virt, phba->slim2p.phys);
14195
14196 /* unmap adapter SLIM and Control Registers */
14197 iounmap(phba->ctrl_regs_memmap_p);
14198 iounmap(phba->slim_memmap_p);
14199
14200 lpfc_hba_free(phba);
14201
14202 pci_release_mem_regions(pdev);
14203 pci_disable_device(pdev);
14204 }
14205
14206 /**
14207 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14208 * @dev_d: pointer to device
14209 *
14210 * This routine is to be called from the kernel's PCI subsystem to support
14211 * system Power Management (PM) to device with SLI-3 interface spec. When
14212 * PM invokes this method, it quiesces the device by stopping the driver's
14213 * worker thread for the device, turning off device's interrupt and DMA,
14214 * and bring the device offline. Note that as the driver implements the
14215 * minimum PM requirements to a power-aware driver's PM support for the
14216 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14217 * to the suspend() method call will be treated as SUSPEND and the driver will
14218 * fully reinitialize its device during resume() method call, the driver will
14219 * set device to PCI_D3hot state in PCI config space instead of setting it
14220 * according to the @msg provided by the PM.
14221 *
14222 * Return code
14223 * 0 - driver suspended the device
14224 * Error otherwise
14225 **/
14226 static int __maybe_unused
lpfc_pci_suspend_one_s3(struct device * dev_d)14227 lpfc_pci_suspend_one_s3(struct device *dev_d)
14228 {
14229 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14230 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14231
14232 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14233 "0473 PCI device Power Management suspend.\n");
14234
14235 /* Bring down the device */
14236 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14237 lpfc_offline(phba);
14238 kthread_stop(phba->worker_thread);
14239
14240 /* Disable interrupt from device */
14241 lpfc_sli_disable_intr(phba);
14242
14243 return 0;
14244 }
14245
14246 /**
14247 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14248 * @dev_d: pointer to device
14249 *
14250 * This routine is to be called from the kernel's PCI subsystem to support
14251 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14252 * invokes this method, it restores the device's PCI config space state and
14253 * fully reinitializes the device and brings it online. Note that as the
14254 * driver implements the minimum PM requirements to a power-aware driver's
14255 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14256 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14257 * driver will fully reinitialize its device during resume() method call,
14258 * the device will be set to PCI_D0 directly in PCI config space before
14259 * restoring the state.
14260 *
14261 * Return code
14262 * 0 - driver suspended the device
14263 * Error otherwise
14264 **/
14265 static int __maybe_unused
lpfc_pci_resume_one_s3(struct device * dev_d)14266 lpfc_pci_resume_one_s3(struct device *dev_d)
14267 {
14268 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14269 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14270 uint32_t intr_mode;
14271 int error;
14272
14273 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14274 "0452 PCI device Power Management resume.\n");
14275
14276 /* Startup the kernel thread for this host adapter. */
14277 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14278 "lpfc_worker_%d", phba->brd_no);
14279 if (IS_ERR(phba->worker_thread)) {
14280 error = PTR_ERR(phba->worker_thread);
14281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14282 "0434 PM resume failed to start worker "
14283 "thread: error=x%x.\n", error);
14284 return error;
14285 }
14286
14287 /* Init cpu_map array */
14288 lpfc_cpu_map_array_init(phba);
14289 /* Init hba_eq_hdl array */
14290 lpfc_hba_eq_hdl_array_init(phba);
14291 /* Configure and enable interrupt */
14292 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14293 if (intr_mode == LPFC_INTR_ERROR) {
14294 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14295 "0430 PM resume Failed to enable interrupt\n");
14296 return -EIO;
14297 } else
14298 phba->intr_mode = intr_mode;
14299
14300 /* Restart HBA and bring it online */
14301 lpfc_sli_brdrestart(phba);
14302 lpfc_online(phba);
14303
14304 /* Log the current active interrupt mode */
14305 lpfc_log_intr_mode(phba, phba->intr_mode);
14306
14307 return 0;
14308 }
14309
14310 /**
14311 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14312 * @phba: pointer to lpfc hba data structure.
14313 *
14314 * This routine is called to prepare the SLI3 device for PCI slot recover. It
14315 * aborts all the outstanding SCSI I/Os to the pci device.
14316 **/
14317 static void
lpfc_sli_prep_dev_for_recover(struct lpfc_hba * phba)14318 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14319 {
14320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14321 "2723 PCI channel I/O abort preparing for recovery\n");
14322
14323 /*
14324 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14325 * and let the SCSI mid-layer to retry them to recover.
14326 */
14327 lpfc_sli_abort_fcp_rings(phba);
14328 }
14329
14330 /**
14331 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14332 * @phba: pointer to lpfc hba data structure.
14333 *
14334 * This routine is called to prepare the SLI3 device for PCI slot reset. It
14335 * disables the device interrupt and pci device, and aborts the internal FCP
14336 * pending I/Os.
14337 **/
14338 static void
lpfc_sli_prep_dev_for_reset(struct lpfc_hba * phba)14339 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14340 {
14341 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14342 "2710 PCI channel disable preparing for reset\n");
14343
14344 /* Block any management I/Os to the device */
14345 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14346
14347 /* Block all SCSI devices' I/Os on the host */
14348 lpfc_scsi_dev_block(phba);
14349
14350 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14351 lpfc_sli_flush_io_rings(phba);
14352
14353 /* stop all timers */
14354 lpfc_stop_hba_timers(phba);
14355
14356 /* Disable interrupt and pci device */
14357 lpfc_sli_disable_intr(phba);
14358 pci_disable_device(phba->pcidev);
14359 }
14360
14361 /**
14362 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14363 * @phba: pointer to lpfc hba data structure.
14364 *
14365 * This routine is called to prepare the SLI3 device for PCI slot permanently
14366 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14367 * pending I/Os.
14368 **/
14369 static void
lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba * phba)14370 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14371 {
14372 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14373 "2711 PCI channel permanent disable for failure\n");
14374 /* Block all SCSI devices' I/Os on the host */
14375 lpfc_scsi_dev_block(phba);
14376 lpfc_sli4_prep_dev_for_reset(phba);
14377
14378 /* stop all timers */
14379 lpfc_stop_hba_timers(phba);
14380
14381 /* Clean up all driver's outstanding SCSI I/Os */
14382 lpfc_sli_flush_io_rings(phba);
14383 }
14384
14385 /**
14386 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14387 * @pdev: pointer to PCI device.
14388 * @state: the current PCI connection state.
14389 *
14390 * This routine is called from the PCI subsystem for I/O error handling to
14391 * device with SLI-3 interface spec. This function is called by the PCI
14392 * subsystem after a PCI bus error affecting this device has been detected.
14393 * When this function is invoked, it will need to stop all the I/Os and
14394 * interrupt(s) to the device. Once that is done, it will return
14395 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14396 * as desired.
14397 *
14398 * Return codes
14399 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14400 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14401 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14402 **/
14403 static pci_ers_result_t
lpfc_io_error_detected_s3(struct pci_dev * pdev,pci_channel_state_t state)14404 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14405 {
14406 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14407 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14408
14409 switch (state) {
14410 case pci_channel_io_normal:
14411 /* Non-fatal error, prepare for recovery */
14412 lpfc_sli_prep_dev_for_recover(phba);
14413 return PCI_ERS_RESULT_CAN_RECOVER;
14414 case pci_channel_io_frozen:
14415 /* Fatal error, prepare for slot reset */
14416 lpfc_sli_prep_dev_for_reset(phba);
14417 return PCI_ERS_RESULT_NEED_RESET;
14418 case pci_channel_io_perm_failure:
14419 /* Permanent failure, prepare for device down */
14420 lpfc_sli_prep_dev_for_perm_failure(phba);
14421 return PCI_ERS_RESULT_DISCONNECT;
14422 default:
14423 /* Unknown state, prepare and request slot reset */
14424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14425 "0472 Unknown PCI error state: x%x\n", state);
14426 lpfc_sli_prep_dev_for_reset(phba);
14427 return PCI_ERS_RESULT_NEED_RESET;
14428 }
14429 }
14430
14431 /**
14432 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14433 * @pdev: pointer to PCI device.
14434 *
14435 * This routine is called from the PCI subsystem for error handling to
14436 * device with SLI-3 interface spec. This is called after PCI bus has been
14437 * reset to restart the PCI card from scratch, as if from a cold-boot.
14438 * During the PCI subsystem error recovery, after driver returns
14439 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14440 * recovery and then call this routine before calling the .resume method
14441 * to recover the device. This function will initialize the HBA device,
14442 * enable the interrupt, but it will just put the HBA to offline state
14443 * without passing any I/O traffic.
14444 *
14445 * Return codes
14446 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14447 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14448 */
14449 static pci_ers_result_t
lpfc_io_slot_reset_s3(struct pci_dev * pdev)14450 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14451 {
14452 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14453 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14454 struct lpfc_sli *psli = &phba->sli;
14455 uint32_t intr_mode;
14456
14457 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14458 if (pci_enable_device_mem(pdev)) {
14459 printk(KERN_ERR "lpfc: Cannot re-enable "
14460 "PCI device after reset.\n");
14461 return PCI_ERS_RESULT_DISCONNECT;
14462 }
14463
14464 pci_restore_state(pdev);
14465
14466 /*
14467 * As the new kernel behavior of pci_restore_state() API call clears
14468 * device saved_state flag, need to save the restored state again.
14469 */
14470 pci_save_state(pdev);
14471
14472 if (pdev->is_busmaster)
14473 pci_set_master(pdev);
14474
14475 spin_lock_irq(&phba->hbalock);
14476 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14477 spin_unlock_irq(&phba->hbalock);
14478
14479 /* Configure and enable interrupt */
14480 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14481 if (intr_mode == LPFC_INTR_ERROR) {
14482 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14483 "0427 Cannot re-enable interrupt after "
14484 "slot reset.\n");
14485 return PCI_ERS_RESULT_DISCONNECT;
14486 } else
14487 phba->intr_mode = intr_mode;
14488
14489 /* Take device offline, it will perform cleanup */
14490 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14491 lpfc_offline(phba);
14492 lpfc_sli_brdrestart(phba);
14493
14494 /* Log the current active interrupt mode */
14495 lpfc_log_intr_mode(phba, phba->intr_mode);
14496
14497 return PCI_ERS_RESULT_RECOVERED;
14498 }
14499
14500 /**
14501 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14502 * @pdev: pointer to PCI device
14503 *
14504 * This routine is called from the PCI subsystem for error handling to device
14505 * with SLI-3 interface spec. It is called when kernel error recovery tells
14506 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14507 * error recovery. After this call, traffic can start to flow from this device
14508 * again.
14509 */
14510 static void
lpfc_io_resume_s3(struct pci_dev * pdev)14511 lpfc_io_resume_s3(struct pci_dev *pdev)
14512 {
14513 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14514 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14515
14516 /* Bring device online, it will be no-op for non-fatal error resume */
14517 lpfc_online(phba);
14518 }
14519
14520 /**
14521 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14522 * @phba: pointer to lpfc hba data structure.
14523 *
14524 * returns the number of ELS/CT IOCBs to reserve
14525 **/
14526 int
lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba * phba)14527 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14528 {
14529 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14530
14531 if (phba->sli_rev == LPFC_SLI_REV4) {
14532 if (max_xri <= 100)
14533 return 10;
14534 else if (max_xri <= 256)
14535 return 25;
14536 else if (max_xri <= 512)
14537 return 50;
14538 else if (max_xri <= 1024)
14539 return 100;
14540 else if (max_xri <= 1536)
14541 return 150;
14542 else if (max_xri <= 2048)
14543 return 200;
14544 else
14545 return 250;
14546 } else
14547 return 0;
14548 }
14549
14550 /**
14551 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14552 * @phba: pointer to lpfc hba data structure.
14553 *
14554 * returns the number of ELS/CT + NVMET IOCBs to reserve
14555 **/
14556 int
lpfc_sli4_get_iocb_cnt(struct lpfc_hba * phba)14557 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14558 {
14559 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14560
14561 if (phba->nvmet_support)
14562 max_xri += LPFC_NVMET_BUF_POST;
14563 return max_xri;
14564 }
14565
14566
14567 static int
lpfc_log_write_firmware_error(struct lpfc_hba * phba,uint32_t offset,uint32_t magic_number,uint32_t ftype,uint32_t fid,uint32_t fsize,const struct firmware * fw)14568 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14569 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14570 const struct firmware *fw)
14571 {
14572 int rc;
14573 u8 sli_family;
14574
14575 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14576 /* Three cases: (1) FW was not supported on the detected adapter.
14577 * (2) FW update has been locked out administratively.
14578 * (3) Some other error during FW update.
14579 * In each case, an unmaskable message is written to the console
14580 * for admin diagnosis.
14581 */
14582 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14583 (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14584 magic_number != MAGIC_NUMBER_G6) ||
14585 (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14586 magic_number != MAGIC_NUMBER_G7) ||
14587 (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14588 magic_number != MAGIC_NUMBER_G7P)) {
14589 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14590 "3030 This firmware version is not supported on"
14591 " this HBA model. Device:%x Magic:%x Type:%x "
14592 "ID:%x Size %d %zd\n",
14593 phba->pcidev->device, magic_number, ftype, fid,
14594 fsize, fw->size);
14595 rc = -EINVAL;
14596 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14597 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14598 "3021 Firmware downloads have been prohibited "
14599 "by a system configuration setting on "
14600 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14601 "%zd\n",
14602 phba->pcidev->device, magic_number, ftype, fid,
14603 fsize, fw->size);
14604 rc = -EACCES;
14605 } else {
14606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14607 "3022 FW Download failed. Add Status x%x "
14608 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14609 "%zd\n",
14610 offset, phba->pcidev->device, magic_number,
14611 ftype, fid, fsize, fw->size);
14612 rc = -EIO;
14613 }
14614 return rc;
14615 }
14616
14617 /**
14618 * lpfc_write_firmware - attempt to write a firmware image to the port
14619 * @fw: pointer to firmware image returned from request_firmware.
14620 * @context: pointer to firmware image returned from request_firmware.
14621 *
14622 **/
14623 static void
lpfc_write_firmware(const struct firmware * fw,void * context)14624 lpfc_write_firmware(const struct firmware *fw, void *context)
14625 {
14626 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14627 char fwrev[FW_REV_STR_SIZE];
14628 struct lpfc_grp_hdr *image;
14629 struct list_head dma_buffer_list;
14630 int i, rc = 0;
14631 struct lpfc_dmabuf *dmabuf, *next;
14632 uint32_t offset = 0, temp_offset = 0;
14633 uint32_t magic_number, ftype, fid, fsize;
14634
14635 /* It can be null in no-wait mode, sanity check */
14636 if (!fw) {
14637 rc = -ENXIO;
14638 goto out;
14639 }
14640 image = (struct lpfc_grp_hdr *)fw->data;
14641
14642 magic_number = be32_to_cpu(image->magic_number);
14643 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14644 fid = bf_get_be32(lpfc_grp_hdr_id, image);
14645 fsize = be32_to_cpu(image->size);
14646
14647 INIT_LIST_HEAD(&dma_buffer_list);
14648 lpfc_decode_firmware_rev(phba, fwrev, 1);
14649 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14650 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14651 "3023 Updating Firmware, Current Version:%s "
14652 "New Version:%s\n",
14653 fwrev, image->revision);
14654 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14655 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14656 GFP_KERNEL);
14657 if (!dmabuf) {
14658 rc = -ENOMEM;
14659 goto release_out;
14660 }
14661 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14662 SLI4_PAGE_SIZE,
14663 &dmabuf->phys,
14664 GFP_KERNEL);
14665 if (!dmabuf->virt) {
14666 kfree(dmabuf);
14667 rc = -ENOMEM;
14668 goto release_out;
14669 }
14670 list_add_tail(&dmabuf->list, &dma_buffer_list);
14671 }
14672 while (offset < fw->size) {
14673 temp_offset = offset;
14674 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14675 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14676 memcpy(dmabuf->virt,
14677 fw->data + temp_offset,
14678 fw->size - temp_offset);
14679 temp_offset = fw->size;
14680 break;
14681 }
14682 memcpy(dmabuf->virt, fw->data + temp_offset,
14683 SLI4_PAGE_SIZE);
14684 temp_offset += SLI4_PAGE_SIZE;
14685 }
14686 rc = lpfc_wr_object(phba, &dma_buffer_list,
14687 (fw->size - offset), &offset);
14688 if (rc) {
14689 rc = lpfc_log_write_firmware_error(phba, offset,
14690 magic_number,
14691 ftype,
14692 fid,
14693 fsize,
14694 fw);
14695 goto release_out;
14696 }
14697 }
14698 rc = offset;
14699 } else
14700 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14701 "3029 Skipped Firmware update, Current "
14702 "Version:%s New Version:%s\n",
14703 fwrev, image->revision);
14704
14705 release_out:
14706 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14707 list_del(&dmabuf->list);
14708 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14709 dmabuf->virt, dmabuf->phys);
14710 kfree(dmabuf);
14711 }
14712 release_firmware(fw);
14713 out:
14714 if (rc < 0)
14715 lpfc_log_msg(phba, KERN_ERR, LOG_INIT | LOG_SLI,
14716 "3062 Firmware update error, status %d.\n", rc);
14717 else
14718 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14719 "3024 Firmware update success: size %d.\n", rc);
14720 }
14721
14722 /**
14723 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14724 * @phba: pointer to lpfc hba data structure.
14725 * @fw_upgrade: which firmware to update.
14726 *
14727 * This routine is called to perform Linux generic firmware upgrade on device
14728 * that supports such feature.
14729 **/
14730 int
lpfc_sli4_request_firmware_update(struct lpfc_hba * phba,uint8_t fw_upgrade)14731 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14732 {
14733 char file_name[ELX_FW_NAME_SIZE] = {0};
14734 int ret;
14735 const struct firmware *fw;
14736
14737 /* Only supported on SLI4 interface type 2 for now */
14738 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14739 LPFC_SLI_INTF_IF_TYPE_2)
14740 return -EPERM;
14741
14742 scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName);
14743
14744 if (fw_upgrade == INT_FW_UPGRADE) {
14745 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14746 file_name, &phba->pcidev->dev,
14747 GFP_KERNEL, (void *)phba,
14748 lpfc_write_firmware);
14749 } else if (fw_upgrade == RUN_FW_UPGRADE) {
14750 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14751 if (!ret)
14752 lpfc_write_firmware(fw, (void *)phba);
14753 } else {
14754 ret = -EINVAL;
14755 }
14756
14757 return ret;
14758 }
14759
14760 /**
14761 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14762 * @pdev: pointer to PCI device
14763 * @pid: pointer to PCI device identifier
14764 *
14765 * This routine is called from the kernel's PCI subsystem to device with
14766 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14767 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14768 * information of the device and driver to see if the driver state that it
14769 * can support this kind of device. If the match is successful, the driver
14770 * core invokes this routine. If this routine determines it can claim the HBA,
14771 * it does all the initialization that it needs to do to handle the HBA
14772 * properly.
14773 *
14774 * Return code
14775 * 0 - driver can claim the device
14776 * negative value - driver can not claim the device
14777 **/
14778 static int
lpfc_pci_probe_one_s4(struct pci_dev * pdev,const struct pci_device_id * pid)14779 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14780 {
14781 struct lpfc_hba *phba;
14782 struct lpfc_vport *vport = NULL;
14783 struct Scsi_Host *shost = NULL;
14784 int error;
14785 uint32_t cfg_mode, intr_mode;
14786
14787 /* Allocate memory for HBA structure */
14788 phba = lpfc_hba_alloc(pdev);
14789 if (!phba)
14790 return -ENOMEM;
14791
14792 INIT_LIST_HEAD(&phba->poll_list);
14793
14794 /* Perform generic PCI device enabling operation */
14795 error = lpfc_enable_pci_dev(phba);
14796 if (error)
14797 goto out_free_phba;
14798
14799 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14800 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14801 if (error)
14802 goto out_disable_pci_dev;
14803
14804 /* Set up SLI-4 specific device PCI memory space */
14805 error = lpfc_sli4_pci_mem_setup(phba);
14806 if (error) {
14807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14808 "1410 Failed to set up pci memory space.\n");
14809 goto out_disable_pci_dev;
14810 }
14811
14812 /* Set up SLI-4 Specific device driver resources */
14813 error = lpfc_sli4_driver_resource_setup(phba);
14814 if (error) {
14815 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14816 "1412 Failed to set up driver resource.\n");
14817 goto out_unset_pci_mem_s4;
14818 }
14819
14820 INIT_LIST_HEAD(&phba->active_rrq_list);
14821 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14822
14823 /* Set up common device driver resources */
14824 error = lpfc_setup_driver_resource_phase2(phba);
14825 if (error) {
14826 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14827 "1414 Failed to set up driver resource.\n");
14828 goto out_unset_driver_resource_s4;
14829 }
14830
14831 /* Get the default values for Model Name and Description */
14832 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14833
14834 /* Now, trying to enable interrupt and bring up the device */
14835 cfg_mode = phba->cfg_use_msi;
14836
14837 /* Put device to a known state before enabling interrupt */
14838 phba->pport = NULL;
14839 lpfc_stop_port(phba);
14840
14841 /* Init cpu_map array */
14842 lpfc_cpu_map_array_init(phba);
14843
14844 /* Init hba_eq_hdl array */
14845 lpfc_hba_eq_hdl_array_init(phba);
14846
14847 /* Configure and enable interrupt */
14848 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14849 if (intr_mode == LPFC_INTR_ERROR) {
14850 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14851 "0426 Failed to enable interrupt.\n");
14852 error = -ENODEV;
14853 goto out_unset_driver_resource;
14854 }
14855 /* Default to single EQ for non-MSI-X */
14856 if (phba->intr_type != MSIX) {
14857 phba->cfg_irq_chann = 1;
14858 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14859 if (phba->nvmet_support)
14860 phba->cfg_nvmet_mrq = 1;
14861 }
14862 }
14863 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14864
14865 /* Create SCSI host to the physical port */
14866 error = lpfc_create_shost(phba);
14867 if (error) {
14868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14869 "1415 Failed to create scsi host.\n");
14870 goto out_disable_intr;
14871 }
14872 vport = phba->pport;
14873 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14874
14875 /* Configure sysfs attributes */
14876 error = lpfc_alloc_sysfs_attr(vport);
14877 if (error) {
14878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14879 "1416 Failed to allocate sysfs attr\n");
14880 goto out_destroy_shost;
14881 }
14882
14883 /* Set up SLI-4 HBA */
14884 if (lpfc_sli4_hba_setup(phba)) {
14885 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14886 "1421 Failed to set up hba\n");
14887 error = -ENODEV;
14888 goto out_free_sysfs_attr;
14889 }
14890
14891 /* Log the current active interrupt mode */
14892 phba->intr_mode = intr_mode;
14893 lpfc_log_intr_mode(phba, intr_mode);
14894
14895 /* Perform post initialization setup */
14896 lpfc_post_init_setup(phba);
14897
14898 /* NVME support in FW earlier in the driver load corrects the
14899 * FC4 type making a check for nvme_support unnecessary.
14900 */
14901 if (phba->nvmet_support == 0) {
14902 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14903 /* Create NVME binding with nvme_fc_transport. This
14904 * ensures the vport is initialized. If the localport
14905 * create fails, it should not unload the driver to
14906 * support field issues.
14907 */
14908 error = lpfc_nvme_create_localport(vport);
14909 if (error) {
14910 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14911 "6004 NVME registration "
14912 "failed, error x%x\n",
14913 error);
14914 }
14915 }
14916 }
14917
14918 /* check for firmware upgrade or downgrade */
14919 if (phba->cfg_request_firmware_upgrade)
14920 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14921
14922 /* Check if there are static vports to be created. */
14923 lpfc_create_static_vport(phba);
14924
14925 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14926 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14927
14928 return 0;
14929
14930 out_free_sysfs_attr:
14931 lpfc_free_sysfs_attr(vport);
14932 out_destroy_shost:
14933 lpfc_destroy_shost(phba);
14934 out_disable_intr:
14935 lpfc_sli4_disable_intr(phba);
14936 out_unset_driver_resource:
14937 lpfc_unset_driver_resource_phase2(phba);
14938 out_unset_driver_resource_s4:
14939 lpfc_sli4_driver_resource_unset(phba);
14940 out_unset_pci_mem_s4:
14941 lpfc_sli4_pci_mem_unset(phba);
14942 out_disable_pci_dev:
14943 lpfc_disable_pci_dev(phba);
14944 if (shost)
14945 scsi_host_put(shost);
14946 out_free_phba:
14947 lpfc_hba_free(phba);
14948 return error;
14949 }
14950
14951 /**
14952 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14953 * @pdev: pointer to PCI device
14954 *
14955 * This routine is called from the kernel's PCI subsystem to device with
14956 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14957 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14958 * device to be removed from the PCI subsystem properly.
14959 **/
14960 static void
lpfc_pci_remove_one_s4(struct pci_dev * pdev)14961 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14962 {
14963 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14964 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14965 struct lpfc_vport **vports;
14966 struct lpfc_hba *phba = vport->phba;
14967 int i;
14968
14969 /* Mark the device unloading flag */
14970 spin_lock_irq(&phba->hbalock);
14971 vport->load_flag |= FC_UNLOADING;
14972 spin_unlock_irq(&phba->hbalock);
14973 if (phba->cgn_i)
14974 lpfc_unreg_congestion_buf(phba);
14975
14976 lpfc_free_sysfs_attr(vport);
14977
14978 /* Release all the vports against this physical port */
14979 vports = lpfc_create_vport_work_array(phba);
14980 if (vports != NULL)
14981 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14982 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14983 continue;
14984 fc_vport_terminate(vports[i]->fc_vport);
14985 }
14986 lpfc_destroy_vport_work_array(phba, vports);
14987
14988 /* Remove FC host with the physical port */
14989 fc_remove_host(shost);
14990 scsi_remove_host(shost);
14991
14992 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
14993 * localports are destroyed after to cleanup all transport memory.
14994 */
14995 lpfc_cleanup(vport);
14996 lpfc_nvmet_destroy_targetport(phba);
14997 lpfc_nvme_destroy_localport(vport);
14998
14999 /* De-allocate multi-XRI pools */
15000 if (phba->cfg_xri_rebalancing)
15001 lpfc_destroy_multixri_pools(phba);
15002
15003 /*
15004 * Bring down the SLI Layer. This step disables all interrupts,
15005 * clears the rings, discards all mailbox commands, and resets
15006 * the HBA FCoE function.
15007 */
15008 lpfc_debugfs_terminate(vport);
15009
15010 lpfc_stop_hba_timers(phba);
15011 spin_lock_irq(&phba->port_list_lock);
15012 list_del_init(&vport->listentry);
15013 spin_unlock_irq(&phba->port_list_lock);
15014
15015 /* Perform scsi free before driver resource_unset since scsi
15016 * buffers are released to their corresponding pools here.
15017 */
15018 lpfc_io_free(phba);
15019 lpfc_free_iocb_list(phba);
15020 lpfc_sli4_hba_unset(phba);
15021
15022 lpfc_unset_driver_resource_phase2(phba);
15023 lpfc_sli4_driver_resource_unset(phba);
15024
15025 /* Unmap adapter Control and Doorbell registers */
15026 lpfc_sli4_pci_mem_unset(phba);
15027
15028 /* Release PCI resources and disable device's PCI function */
15029 scsi_host_put(shost);
15030 lpfc_disable_pci_dev(phba);
15031
15032 /* Finally, free the driver's device data structure */
15033 lpfc_hba_free(phba);
15034
15035 return;
15036 }
15037
15038 /**
15039 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
15040 * @dev_d: pointer to device
15041 *
15042 * This routine is called from the kernel's PCI subsystem to support system
15043 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
15044 * this method, it quiesces the device by stopping the driver's worker
15045 * thread for the device, turning off device's interrupt and DMA, and bring
15046 * the device offline. Note that as the driver implements the minimum PM
15047 * requirements to a power-aware driver's PM support for suspend/resume -- all
15048 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
15049 * method call will be treated as SUSPEND and the driver will fully
15050 * reinitialize its device during resume() method call, the driver will set
15051 * device to PCI_D3hot state in PCI config space instead of setting it
15052 * according to the @msg provided by the PM.
15053 *
15054 * Return code
15055 * 0 - driver suspended the device
15056 * Error otherwise
15057 **/
15058 static int __maybe_unused
lpfc_pci_suspend_one_s4(struct device * dev_d)15059 lpfc_pci_suspend_one_s4(struct device *dev_d)
15060 {
15061 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15062 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15063
15064 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15065 "2843 PCI device Power Management suspend.\n");
15066
15067 /* Bring down the device */
15068 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15069 lpfc_offline(phba);
15070 kthread_stop(phba->worker_thread);
15071
15072 /* Disable interrupt from device */
15073 lpfc_sli4_disable_intr(phba);
15074 lpfc_sli4_queue_destroy(phba);
15075
15076 return 0;
15077 }
15078
15079 /**
15080 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15081 * @dev_d: pointer to device
15082 *
15083 * This routine is called from the kernel's PCI subsystem to support system
15084 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15085 * this method, it restores the device's PCI config space state and fully
15086 * reinitializes the device and brings it online. Note that as the driver
15087 * implements the minimum PM requirements to a power-aware driver's PM for
15088 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15089 * to the suspend() method call will be treated as SUSPEND and the driver
15090 * will fully reinitialize its device during resume() method call, the device
15091 * will be set to PCI_D0 directly in PCI config space before restoring the
15092 * state.
15093 *
15094 * Return code
15095 * 0 - driver suspended the device
15096 * Error otherwise
15097 **/
15098 static int __maybe_unused
lpfc_pci_resume_one_s4(struct device * dev_d)15099 lpfc_pci_resume_one_s4(struct device *dev_d)
15100 {
15101 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15102 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15103 uint32_t intr_mode;
15104 int error;
15105
15106 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15107 "0292 PCI device Power Management resume.\n");
15108
15109 /* Startup the kernel thread for this host adapter. */
15110 phba->worker_thread = kthread_run(lpfc_do_work, phba,
15111 "lpfc_worker_%d", phba->brd_no);
15112 if (IS_ERR(phba->worker_thread)) {
15113 error = PTR_ERR(phba->worker_thread);
15114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15115 "0293 PM resume failed to start worker "
15116 "thread: error=x%x.\n", error);
15117 return error;
15118 }
15119
15120 /* Configure and enable interrupt */
15121 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15122 if (intr_mode == LPFC_INTR_ERROR) {
15123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15124 "0294 PM resume Failed to enable interrupt\n");
15125 return -EIO;
15126 } else
15127 phba->intr_mode = intr_mode;
15128
15129 /* Restart HBA and bring it online */
15130 lpfc_sli_brdrestart(phba);
15131 lpfc_online(phba);
15132
15133 /* Log the current active interrupt mode */
15134 lpfc_log_intr_mode(phba, phba->intr_mode);
15135
15136 return 0;
15137 }
15138
15139 /**
15140 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15141 * @phba: pointer to lpfc hba data structure.
15142 *
15143 * This routine is called to prepare the SLI4 device for PCI slot recover. It
15144 * aborts all the outstanding SCSI I/Os to the pci device.
15145 **/
15146 static void
lpfc_sli4_prep_dev_for_recover(struct lpfc_hba * phba)15147 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15148 {
15149 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15150 "2828 PCI channel I/O abort preparing for recovery\n");
15151 /*
15152 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
15153 * and let the SCSI mid-layer to retry them to recover.
15154 */
15155 lpfc_sli_abort_fcp_rings(phba);
15156 }
15157
15158 /**
15159 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15160 * @phba: pointer to lpfc hba data structure.
15161 *
15162 * This routine is called to prepare the SLI4 device for PCI slot reset. It
15163 * disables the device interrupt and pci device, and aborts the internal FCP
15164 * pending I/Os.
15165 **/
15166 static void
lpfc_sli4_prep_dev_for_reset(struct lpfc_hba * phba)15167 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15168 {
15169 int offline = pci_channel_offline(phba->pcidev);
15170
15171 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15172 "2826 PCI channel disable preparing for reset offline"
15173 " %d\n", offline);
15174
15175 /* Block any management I/Os to the device */
15176 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15177
15178
15179 /* HBA_PCI_ERR was set in io_error_detect */
15180 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15181 /* Flush all driver's outstanding I/Os as we are to reset */
15182 lpfc_sli_flush_io_rings(phba);
15183 lpfc_offline(phba);
15184
15185 /* stop all timers */
15186 lpfc_stop_hba_timers(phba);
15187
15188 lpfc_sli4_queue_destroy(phba);
15189 /* Disable interrupt and pci device */
15190 lpfc_sli4_disable_intr(phba);
15191 pci_disable_device(phba->pcidev);
15192 }
15193
15194 /**
15195 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15196 * @phba: pointer to lpfc hba data structure.
15197 *
15198 * This routine is called to prepare the SLI4 device for PCI slot permanently
15199 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15200 * pending I/Os.
15201 **/
15202 static void
lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba * phba)15203 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15204 {
15205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15206 "2827 PCI channel permanent disable for failure\n");
15207
15208 /* Block all SCSI devices' I/Os on the host */
15209 lpfc_scsi_dev_block(phba);
15210
15211 /* stop all timers */
15212 lpfc_stop_hba_timers(phba);
15213
15214 /* Clean up all driver's outstanding I/Os */
15215 lpfc_sli_flush_io_rings(phba);
15216 }
15217
15218 /**
15219 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15220 * @pdev: pointer to PCI device.
15221 * @state: the current PCI connection state.
15222 *
15223 * This routine is called from the PCI subsystem for error handling to device
15224 * with SLI-4 interface spec. This function is called by the PCI subsystem
15225 * after a PCI bus error affecting this device has been detected. When this
15226 * function is invoked, it will need to stop all the I/Os and interrupt(s)
15227 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15228 * for the PCI subsystem to perform proper recovery as desired.
15229 *
15230 * Return codes
15231 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15232 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15233 **/
15234 static pci_ers_result_t
lpfc_io_error_detected_s4(struct pci_dev * pdev,pci_channel_state_t state)15235 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15236 {
15237 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15238 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15239 bool hba_pci_err;
15240
15241 switch (state) {
15242 case pci_channel_io_normal:
15243 /* Non-fatal error, prepare for recovery */
15244 lpfc_sli4_prep_dev_for_recover(phba);
15245 return PCI_ERS_RESULT_CAN_RECOVER;
15246 case pci_channel_io_frozen:
15247 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15248 /* Fatal error, prepare for slot reset */
15249 if (!hba_pci_err)
15250 lpfc_sli4_prep_dev_for_reset(phba);
15251 else
15252 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15253 "2832 Already handling PCI error "
15254 "state: x%x\n", state);
15255 return PCI_ERS_RESULT_NEED_RESET;
15256 case pci_channel_io_perm_failure:
15257 set_bit(HBA_PCI_ERR, &phba->bit_flags);
15258 /* Permanent failure, prepare for device down */
15259 lpfc_sli4_prep_dev_for_perm_failure(phba);
15260 return PCI_ERS_RESULT_DISCONNECT;
15261 default:
15262 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15263 if (!hba_pci_err)
15264 lpfc_sli4_prep_dev_for_reset(phba);
15265 /* Unknown state, prepare and request slot reset */
15266 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15267 "2825 Unknown PCI error state: x%x\n", state);
15268 lpfc_sli4_prep_dev_for_reset(phba);
15269 return PCI_ERS_RESULT_NEED_RESET;
15270 }
15271 }
15272
15273 /**
15274 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15275 * @pdev: pointer to PCI device.
15276 *
15277 * This routine is called from the PCI subsystem for error handling to device
15278 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15279 * restart the PCI card from scratch, as if from a cold-boot. During the
15280 * PCI subsystem error recovery, after the driver returns
15281 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15282 * recovery and then call this routine before calling the .resume method to
15283 * recover the device. This function will initialize the HBA device, enable
15284 * the interrupt, but it will just put the HBA to offline state without
15285 * passing any I/O traffic.
15286 *
15287 * Return codes
15288 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15289 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15290 */
15291 static pci_ers_result_t
lpfc_io_slot_reset_s4(struct pci_dev * pdev)15292 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15293 {
15294 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15295 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15296 struct lpfc_sli *psli = &phba->sli;
15297 uint32_t intr_mode;
15298 bool hba_pci_err;
15299
15300 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15301 if (pci_enable_device_mem(pdev)) {
15302 printk(KERN_ERR "lpfc: Cannot re-enable "
15303 "PCI device after reset.\n");
15304 return PCI_ERS_RESULT_DISCONNECT;
15305 }
15306
15307 pci_restore_state(pdev);
15308
15309 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15310 if (!hba_pci_err)
15311 dev_info(&pdev->dev,
15312 "hba_pci_err was not set, recovering slot reset.\n");
15313 /*
15314 * As the new kernel behavior of pci_restore_state() API call clears
15315 * device saved_state flag, need to save the restored state again.
15316 */
15317 pci_save_state(pdev);
15318
15319 if (pdev->is_busmaster)
15320 pci_set_master(pdev);
15321
15322 spin_lock_irq(&phba->hbalock);
15323 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15324 spin_unlock_irq(&phba->hbalock);
15325
15326 /* Init cpu_map array */
15327 lpfc_cpu_map_array_init(phba);
15328 /* Configure and enable interrupt */
15329 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15330 if (intr_mode == LPFC_INTR_ERROR) {
15331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15332 "2824 Cannot re-enable interrupt after "
15333 "slot reset.\n");
15334 return PCI_ERS_RESULT_DISCONNECT;
15335 } else
15336 phba->intr_mode = intr_mode;
15337 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15338
15339 /* Log the current active interrupt mode */
15340 lpfc_log_intr_mode(phba, phba->intr_mode);
15341
15342 return PCI_ERS_RESULT_RECOVERED;
15343 }
15344
15345 /**
15346 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15347 * @pdev: pointer to PCI device
15348 *
15349 * This routine is called from the PCI subsystem for error handling to device
15350 * with SLI-4 interface spec. It is called when kernel error recovery tells
15351 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15352 * error recovery. After this call, traffic can start to flow from this device
15353 * again.
15354 **/
15355 static void
lpfc_io_resume_s4(struct pci_dev * pdev)15356 lpfc_io_resume_s4(struct pci_dev *pdev)
15357 {
15358 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15359 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15360
15361 /*
15362 * In case of slot reset, as function reset is performed through
15363 * mailbox command which needs DMA to be enabled, this operation
15364 * has to be moved to the io resume phase. Taking device offline
15365 * will perform the necessary cleanup.
15366 */
15367 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15368 /* Perform device reset */
15369 lpfc_sli_brdrestart(phba);
15370 /* Bring the device back online */
15371 lpfc_online(phba);
15372 }
15373 }
15374
15375 /**
15376 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15377 * @pdev: pointer to PCI device
15378 * @pid: pointer to PCI device identifier
15379 *
15380 * This routine is to be registered to the kernel's PCI subsystem. When an
15381 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15382 * at PCI device-specific information of the device and driver to see if the
15383 * driver state that it can support this kind of device. If the match is
15384 * successful, the driver core invokes this routine. This routine dispatches
15385 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15386 * do all the initialization that it needs to do to handle the HBA device
15387 * properly.
15388 *
15389 * Return code
15390 * 0 - driver can claim the device
15391 * negative value - driver can not claim the device
15392 **/
15393 static int
lpfc_pci_probe_one(struct pci_dev * pdev,const struct pci_device_id * pid)15394 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15395 {
15396 int rc;
15397 struct lpfc_sli_intf intf;
15398
15399 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15400 return -ENODEV;
15401
15402 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15403 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15404 rc = lpfc_pci_probe_one_s4(pdev, pid);
15405 else
15406 rc = lpfc_pci_probe_one_s3(pdev, pid);
15407
15408 return rc;
15409 }
15410
15411 /**
15412 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15413 * @pdev: pointer to PCI device
15414 *
15415 * This routine is to be registered to the kernel's PCI subsystem. When an
15416 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15417 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15418 * remove routine, which will perform all the necessary cleanup for the
15419 * device to be removed from the PCI subsystem properly.
15420 **/
15421 static void
lpfc_pci_remove_one(struct pci_dev * pdev)15422 lpfc_pci_remove_one(struct pci_dev *pdev)
15423 {
15424 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15425 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15426
15427 switch (phba->pci_dev_grp) {
15428 case LPFC_PCI_DEV_LP:
15429 lpfc_pci_remove_one_s3(pdev);
15430 break;
15431 case LPFC_PCI_DEV_OC:
15432 lpfc_pci_remove_one_s4(pdev);
15433 break;
15434 default:
15435 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15436 "1424 Invalid PCI device group: 0x%x\n",
15437 phba->pci_dev_grp);
15438 break;
15439 }
15440 return;
15441 }
15442
15443 /**
15444 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15445 * @dev: pointer to device
15446 *
15447 * This routine is to be registered to the kernel's PCI subsystem to support
15448 * system Power Management (PM). When PM invokes this method, it dispatches
15449 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15450 * suspend the device.
15451 *
15452 * Return code
15453 * 0 - driver suspended the device
15454 * Error otherwise
15455 **/
15456 static int __maybe_unused
lpfc_pci_suspend_one(struct device * dev)15457 lpfc_pci_suspend_one(struct device *dev)
15458 {
15459 struct Scsi_Host *shost = dev_get_drvdata(dev);
15460 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15461 int rc = -ENODEV;
15462
15463 switch (phba->pci_dev_grp) {
15464 case LPFC_PCI_DEV_LP:
15465 rc = lpfc_pci_suspend_one_s3(dev);
15466 break;
15467 case LPFC_PCI_DEV_OC:
15468 rc = lpfc_pci_suspend_one_s4(dev);
15469 break;
15470 default:
15471 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15472 "1425 Invalid PCI device group: 0x%x\n",
15473 phba->pci_dev_grp);
15474 break;
15475 }
15476 return rc;
15477 }
15478
15479 /**
15480 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15481 * @dev: pointer to device
15482 *
15483 * This routine is to be registered to the kernel's PCI subsystem to support
15484 * system Power Management (PM). When PM invokes this method, it dispatches
15485 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15486 * resume the device.
15487 *
15488 * Return code
15489 * 0 - driver suspended the device
15490 * Error otherwise
15491 **/
15492 static int __maybe_unused
lpfc_pci_resume_one(struct device * dev)15493 lpfc_pci_resume_one(struct device *dev)
15494 {
15495 struct Scsi_Host *shost = dev_get_drvdata(dev);
15496 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15497 int rc = -ENODEV;
15498
15499 switch (phba->pci_dev_grp) {
15500 case LPFC_PCI_DEV_LP:
15501 rc = lpfc_pci_resume_one_s3(dev);
15502 break;
15503 case LPFC_PCI_DEV_OC:
15504 rc = lpfc_pci_resume_one_s4(dev);
15505 break;
15506 default:
15507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15508 "1426 Invalid PCI device group: 0x%x\n",
15509 phba->pci_dev_grp);
15510 break;
15511 }
15512 return rc;
15513 }
15514
15515 /**
15516 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15517 * @pdev: pointer to PCI device.
15518 * @state: the current PCI connection state.
15519 *
15520 * This routine is registered to the PCI subsystem for error handling. This
15521 * function is called by the PCI subsystem after a PCI bus error affecting
15522 * this device has been detected. When this routine is invoked, it dispatches
15523 * the action to the proper SLI-3 or SLI-4 device error detected handling
15524 * routine, which will perform the proper error detected operation.
15525 *
15526 * Return codes
15527 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15528 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15529 **/
15530 static pci_ers_result_t
lpfc_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)15531 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15532 {
15533 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15534 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15535 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15536
15537 if (phba->link_state == LPFC_HBA_ERROR &&
15538 phba->hba_flag & HBA_IOQ_FLUSH)
15539 return PCI_ERS_RESULT_NEED_RESET;
15540
15541 switch (phba->pci_dev_grp) {
15542 case LPFC_PCI_DEV_LP:
15543 rc = lpfc_io_error_detected_s3(pdev, state);
15544 break;
15545 case LPFC_PCI_DEV_OC:
15546 rc = lpfc_io_error_detected_s4(pdev, state);
15547 break;
15548 default:
15549 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15550 "1427 Invalid PCI device group: 0x%x\n",
15551 phba->pci_dev_grp);
15552 break;
15553 }
15554 return rc;
15555 }
15556
15557 /**
15558 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15559 * @pdev: pointer to PCI device.
15560 *
15561 * This routine is registered to the PCI subsystem for error handling. This
15562 * function is called after PCI bus has been reset to restart the PCI card
15563 * from scratch, as if from a cold-boot. When this routine is invoked, it
15564 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15565 * routine, which will perform the proper device reset.
15566 *
15567 * Return codes
15568 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15569 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15570 **/
15571 static pci_ers_result_t
lpfc_io_slot_reset(struct pci_dev * pdev)15572 lpfc_io_slot_reset(struct pci_dev *pdev)
15573 {
15574 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15575 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15576 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15577
15578 switch (phba->pci_dev_grp) {
15579 case LPFC_PCI_DEV_LP:
15580 rc = lpfc_io_slot_reset_s3(pdev);
15581 break;
15582 case LPFC_PCI_DEV_OC:
15583 rc = lpfc_io_slot_reset_s4(pdev);
15584 break;
15585 default:
15586 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15587 "1428 Invalid PCI device group: 0x%x\n",
15588 phba->pci_dev_grp);
15589 break;
15590 }
15591 return rc;
15592 }
15593
15594 /**
15595 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15596 * @pdev: pointer to PCI device
15597 *
15598 * This routine is registered to the PCI subsystem for error handling. It
15599 * is called when kernel error recovery tells the lpfc driver that it is
15600 * OK to resume normal PCI operation after PCI bus error recovery. When
15601 * this routine is invoked, it dispatches the action to the proper SLI-3
15602 * or SLI-4 device io_resume routine, which will resume the device operation.
15603 **/
15604 static void
lpfc_io_resume(struct pci_dev * pdev)15605 lpfc_io_resume(struct pci_dev *pdev)
15606 {
15607 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15608 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15609
15610 switch (phba->pci_dev_grp) {
15611 case LPFC_PCI_DEV_LP:
15612 lpfc_io_resume_s3(pdev);
15613 break;
15614 case LPFC_PCI_DEV_OC:
15615 lpfc_io_resume_s4(pdev);
15616 break;
15617 default:
15618 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15619 "1429 Invalid PCI device group: 0x%x\n",
15620 phba->pci_dev_grp);
15621 break;
15622 }
15623 return;
15624 }
15625
15626 /**
15627 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15628 * @phba: pointer to lpfc hba data structure.
15629 *
15630 * This routine checks to see if OAS is supported for this adapter. If
15631 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
15632 * the enable oas flag is cleared and the pool created for OAS device data
15633 * is destroyed.
15634 *
15635 **/
15636 static void
lpfc_sli4_oas_verify(struct lpfc_hba * phba)15637 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15638 {
15639
15640 if (!phba->cfg_EnableXLane)
15641 return;
15642
15643 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15644 phba->cfg_fof = 1;
15645 } else {
15646 phba->cfg_fof = 0;
15647 mempool_destroy(phba->device_data_mem_pool);
15648 phba->device_data_mem_pool = NULL;
15649 }
15650
15651 return;
15652 }
15653
15654 /**
15655 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15656 * @phba: pointer to lpfc hba data structure.
15657 *
15658 * This routine checks to see if RAS is supported by the adapter. Check the
15659 * function through which RAS support enablement is to be done.
15660 **/
15661 void
lpfc_sli4_ras_init(struct lpfc_hba * phba)15662 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15663 {
15664 /* if ASIC_GEN_NUM >= 0xC) */
15665 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15666 LPFC_SLI_INTF_IF_TYPE_6) ||
15667 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15668 LPFC_SLI_INTF_FAMILY_G6)) {
15669 phba->ras_fwlog.ras_hwsupport = true;
15670 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15671 phba->cfg_ras_fwlog_buffsize)
15672 phba->ras_fwlog.ras_enabled = true;
15673 else
15674 phba->ras_fwlog.ras_enabled = false;
15675 } else {
15676 phba->ras_fwlog.ras_hwsupport = false;
15677 }
15678 }
15679
15680
15681 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15682
15683 static const struct pci_error_handlers lpfc_err_handler = {
15684 .error_detected = lpfc_io_error_detected,
15685 .slot_reset = lpfc_io_slot_reset,
15686 .resume = lpfc_io_resume,
15687 };
15688
15689 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15690 lpfc_pci_suspend_one,
15691 lpfc_pci_resume_one);
15692
15693 static struct pci_driver lpfc_driver = {
15694 .name = LPFC_DRIVER_NAME,
15695 .id_table = lpfc_id_table,
15696 .probe = lpfc_pci_probe_one,
15697 .remove = lpfc_pci_remove_one,
15698 .shutdown = lpfc_pci_remove_one,
15699 .driver.pm = &lpfc_pci_pm_ops_one,
15700 .err_handler = &lpfc_err_handler,
15701 };
15702
15703 static const struct file_operations lpfc_mgmt_fop = {
15704 .owner = THIS_MODULE,
15705 };
15706
15707 static struct miscdevice lpfc_mgmt_dev = {
15708 .minor = MISC_DYNAMIC_MINOR,
15709 .name = "lpfcmgmt",
15710 .fops = &lpfc_mgmt_fop,
15711 };
15712
15713 /**
15714 * lpfc_init - lpfc module initialization routine
15715 *
15716 * This routine is to be invoked when the lpfc module is loaded into the
15717 * kernel. The special kernel macro module_init() is used to indicate the
15718 * role of this routine to the kernel as lpfc module entry point.
15719 *
15720 * Return codes
15721 * 0 - successful
15722 * -ENOMEM - FC attach transport failed
15723 * all others - failed
15724 */
15725 static int __init
lpfc_init(void)15726 lpfc_init(void)
15727 {
15728 int error = 0;
15729
15730 pr_info(LPFC_MODULE_DESC "\n");
15731 pr_info(LPFC_COPYRIGHT "\n");
15732
15733 error = misc_register(&lpfc_mgmt_dev);
15734 if (error)
15735 printk(KERN_ERR "Could not register lpfcmgmt device, "
15736 "misc_register returned with status %d", error);
15737
15738 error = -ENOMEM;
15739 lpfc_transport_functions.vport_create = lpfc_vport_create;
15740 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15741 lpfc_transport_template =
15742 fc_attach_transport(&lpfc_transport_functions);
15743 if (lpfc_transport_template == NULL)
15744 goto unregister;
15745 lpfc_vport_transport_template =
15746 fc_attach_transport(&lpfc_vport_transport_functions);
15747 if (lpfc_vport_transport_template == NULL) {
15748 fc_release_transport(lpfc_transport_template);
15749 goto unregister;
15750 }
15751 lpfc_wqe_cmd_template();
15752 lpfc_nvmet_cmd_template();
15753
15754 /* Initialize in case vector mapping is needed */
15755 lpfc_present_cpu = num_present_cpus();
15756
15757 lpfc_pldv_detect = false;
15758
15759 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15760 "lpfc/sli4:online",
15761 lpfc_cpu_online, lpfc_cpu_offline);
15762 if (error < 0)
15763 goto cpuhp_failure;
15764 lpfc_cpuhp_state = error;
15765
15766 error = pci_register_driver(&lpfc_driver);
15767 if (error)
15768 goto unwind;
15769
15770 return error;
15771
15772 unwind:
15773 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15774 cpuhp_failure:
15775 fc_release_transport(lpfc_transport_template);
15776 fc_release_transport(lpfc_vport_transport_template);
15777 unregister:
15778 misc_deregister(&lpfc_mgmt_dev);
15779
15780 return error;
15781 }
15782
lpfc_dmp_dbg(struct lpfc_hba * phba)15783 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15784 {
15785 unsigned int start_idx;
15786 unsigned int dbg_cnt;
15787 unsigned int temp_idx;
15788 int i;
15789 int j = 0;
15790 unsigned long rem_nsec;
15791
15792 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15793 return;
15794
15795 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15796 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15797 if (!dbg_cnt)
15798 goto out;
15799 temp_idx = start_idx;
15800 if (dbg_cnt >= DBG_LOG_SZ) {
15801 dbg_cnt = DBG_LOG_SZ;
15802 temp_idx -= 1;
15803 } else {
15804 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15805 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15806 } else {
15807 if (start_idx < dbg_cnt)
15808 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15809 else
15810 start_idx -= dbg_cnt;
15811 }
15812 }
15813 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15814 start_idx, temp_idx, dbg_cnt);
15815
15816 for (i = 0; i < dbg_cnt; i++) {
15817 if ((start_idx + i) < DBG_LOG_SZ)
15818 temp_idx = (start_idx + i) % DBG_LOG_SZ;
15819 else
15820 temp_idx = j++;
15821 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15822 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15823 temp_idx,
15824 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15825 rem_nsec / 1000,
15826 phba->dbg_log[temp_idx].log);
15827 }
15828 out:
15829 atomic_set(&phba->dbg_log_cnt, 0);
15830 atomic_set(&phba->dbg_log_dmping, 0);
15831 }
15832
15833 __printf(2, 3)
lpfc_dbg_print(struct lpfc_hba * phba,const char * fmt,...)15834 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15835 {
15836 unsigned int idx;
15837 va_list args;
15838 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15839 struct va_format vaf;
15840
15841
15842 va_start(args, fmt);
15843 if (unlikely(dbg_dmping)) {
15844 vaf.fmt = fmt;
15845 vaf.va = &args;
15846 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15847 va_end(args);
15848 return;
15849 }
15850 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15851 DBG_LOG_SZ;
15852
15853 atomic_inc(&phba->dbg_log_cnt);
15854
15855 vscnprintf(phba->dbg_log[idx].log,
15856 sizeof(phba->dbg_log[idx].log), fmt, args);
15857 va_end(args);
15858
15859 phba->dbg_log[idx].t_ns = local_clock();
15860 }
15861
15862 /**
15863 * lpfc_exit - lpfc module removal routine
15864 *
15865 * This routine is invoked when the lpfc module is removed from the kernel.
15866 * The special kernel macro module_exit() is used to indicate the role of
15867 * this routine to the kernel as lpfc module exit point.
15868 */
15869 static void __exit
lpfc_exit(void)15870 lpfc_exit(void)
15871 {
15872 misc_deregister(&lpfc_mgmt_dev);
15873 pci_unregister_driver(&lpfc_driver);
15874 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15875 fc_release_transport(lpfc_transport_template);
15876 fc_release_transport(lpfc_vport_transport_template);
15877 idr_destroy(&lpfc_hba_index);
15878 }
15879
15880 module_init(lpfc_init);
15881 module_exit(lpfc_exit);
15882 MODULE_LICENSE("GPL");
15883 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15884 MODULE_AUTHOR("Broadcom");
15885 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
15886