1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/crash_dump.h>
38 #ifdef CONFIG_X86
39 #include <asm/set_memory.h>
40 #endif
41
42 #include "lpfc_hw4.h"
43 #include "lpfc_hw.h"
44 #include "lpfc_sli.h"
45 #include "lpfc_sli4.h"
46 #include "lpfc_nl.h"
47 #include "lpfc_disc.h"
48 #include "lpfc.h"
49 #include "lpfc_scsi.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_crtn.h"
52 #include "lpfc_logmsg.h"
53 #include "lpfc_compat.h"
54 #include "lpfc_debugfs.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_version.h"
57
58 /* There are only four IOCB completion types. */
59 typedef enum _lpfc_iocb_type {
60 LPFC_UNKNOWN_IOCB,
61 LPFC_UNSOL_IOCB,
62 LPFC_SOL_IOCB,
63 LPFC_ABORT_IOCB
64 } lpfc_iocb_type;
65
66
67 /* Provide function prototypes local to this module. */
68 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
69 uint32_t);
70 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 uint8_t *, uint32_t *);
72 static struct lpfc_iocbq *
73 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
74 struct lpfc_iocbq *rspiocbq);
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *);
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82 int);
83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe,
86 enum lpfc_poll_mode poll_mode);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
90 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
91 struct lpfc_queue *cq,
92 struct lpfc_cqe *cqe);
93 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
94 struct lpfc_iocbq *pwqeq,
95 struct lpfc_sglq *sglq);
96
97 union lpfc_wqe128 lpfc_iread_cmd_template;
98 union lpfc_wqe128 lpfc_iwrite_cmd_template;
99 union lpfc_wqe128 lpfc_icmnd_cmd_template;
100
101 /* Setup WQE templates for IOs */
lpfc_wqe_cmd_template(void)102 void lpfc_wqe_cmd_template(void)
103 {
104 union lpfc_wqe128 *wqe;
105
106 /* IREAD template */
107 wqe = &lpfc_iread_cmd_template;
108 memset(wqe, 0, sizeof(union lpfc_wqe128));
109
110 /* Word 0, 1, 2 - BDE is variable */
111
112 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
113
114 /* Word 4 - total_xfer_len is variable */
115
116 /* Word 5 - is zero */
117
118 /* Word 6 - ctxt_tag, xri_tag is variable */
119
120 /* Word 7 */
121 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
122 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
123 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
124 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
125
126 /* Word 8 - abort_tag is variable */
127
128 /* Word 9 - reqtag is variable */
129
130 /* Word 10 - dbde, wqes is variable */
131 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
132 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
133 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
134 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
135 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
136
137 /* Word 11 - pbde is variable */
138 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
139 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
140 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
141
142 /* Word 12 - is zero */
143
144 /* Word 13, 14, 15 - PBDE is variable */
145
146 /* IWRITE template */
147 wqe = &lpfc_iwrite_cmd_template;
148 memset(wqe, 0, sizeof(union lpfc_wqe128));
149
150 /* Word 0, 1, 2 - BDE is variable */
151
152 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
153
154 /* Word 4 - total_xfer_len is variable */
155
156 /* Word 5 - initial_xfer_len is variable */
157
158 /* Word 6 - ctxt_tag, xri_tag is variable */
159
160 /* Word 7 */
161 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
162 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
163 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
164 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
165
166 /* Word 8 - abort_tag is variable */
167
168 /* Word 9 - reqtag is variable */
169
170 /* Word 10 - dbde, wqes is variable */
171 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
172 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
173 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
174 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
175 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
176
177 /* Word 11 - pbde is variable */
178 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
179 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
180 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
181
182 /* Word 12 - is zero */
183
184 /* Word 13, 14, 15 - PBDE is variable */
185
186 /* ICMND template */
187 wqe = &lpfc_icmnd_cmd_template;
188 memset(wqe, 0, sizeof(union lpfc_wqe128));
189
190 /* Word 0, 1, 2 - BDE is variable */
191
192 /* Word 3 - payload_offset_len is variable */
193
194 /* Word 4, 5 - is zero */
195
196 /* Word 6 - ctxt_tag, xri_tag is variable */
197
198 /* Word 7 */
199 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
200 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
201 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
202 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
203
204 /* Word 8 - abort_tag is variable */
205
206 /* Word 9 - reqtag is variable */
207
208 /* Word 10 - dbde, wqes is variable */
209 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
210 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
211 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
212 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
213 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
214
215 /* Word 11 */
216 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
217 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
218 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
219
220 /* Word 12, 13, 14, 15 - is zero */
221 }
222
223 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
224 /**
225 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
226 * @srcp: Source memory pointer.
227 * @destp: Destination memory pointer.
228 * @cnt: Number of words required to be copied.
229 * Must be a multiple of sizeof(uint64_t)
230 *
231 * This function is used for copying data between driver memory
232 * and the SLI WQ. This function also changes the endianness
233 * of each word if native endianness is different from SLI
234 * endianness. This function can be called with or without
235 * lock.
236 **/
237 static void
lpfc_sli4_pcimem_bcopy(void * srcp,void * destp,uint32_t cnt)238 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
239 {
240 uint64_t *src = srcp;
241 uint64_t *dest = destp;
242 int i;
243
244 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
245 *dest++ = *src++;
246 }
247 #else
248 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
249 #endif
250
251 /**
252 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
253 * @q: The Work Queue to operate on.
254 * @wqe: The work Queue Entry to put on the Work queue.
255 *
256 * This routine will copy the contents of @wqe to the next available entry on
257 * the @q. This function will then ring the Work Queue Doorbell to signal the
258 * HBA to start processing the Work Queue Entry. This function returns 0 if
259 * successful. If no entries are available on @q then this function will return
260 * -ENOMEM.
261 * The caller is expected to hold the hbalock when calling this routine.
262 **/
263 static int
lpfc_sli4_wq_put(struct lpfc_queue * q,union lpfc_wqe128 * wqe)264 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
265 {
266 union lpfc_wqe *temp_wqe;
267 struct lpfc_register doorbell;
268 uint32_t host_index;
269 uint32_t idx;
270 uint32_t i = 0;
271 uint8_t *tmp;
272 u32 if_type;
273
274 /* sanity check on queue memory */
275 if (unlikely(!q))
276 return -ENOMEM;
277
278 temp_wqe = lpfc_sli4_qe(q, q->host_index);
279
280 /* If the host has not yet processed the next entry then we are done */
281 idx = ((q->host_index + 1) % q->entry_count);
282 if (idx == q->hba_index) {
283 q->WQ_overflow++;
284 return -EBUSY;
285 }
286 q->WQ_posted++;
287 /* set consumption flag every once in a while */
288 if (!((q->host_index + 1) % q->notify_interval))
289 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
290 else
291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
292 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
293 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
294 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
295 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
296 /* write to DPP aperture taking advatage of Combined Writes */
297 tmp = (uint8_t *)temp_wqe;
298 #ifdef __raw_writeq
299 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
300 __raw_writeq(*((uint64_t *)(tmp + i)),
301 q->dpp_regaddr + i);
302 #else
303 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
304 __raw_writel(*((uint32_t *)(tmp + i)),
305 q->dpp_regaddr + i);
306 #endif
307 }
308 /* ensure WQE bcopy and DPP flushed before doorbell write */
309 wmb();
310
311 /* Update the host index before invoking device */
312 host_index = q->host_index;
313
314 q->host_index = idx;
315
316 /* Ring Doorbell */
317 doorbell.word0 = 0;
318 if (q->db_format == LPFC_DB_LIST_FORMAT) {
319 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
320 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
321 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
322 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
323 q->dpp_id);
324 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
325 q->queue_id);
326 } else {
327 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
328 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
329
330 /* Leave bits <23:16> clear for if_type 6 dpp */
331 if_type = bf_get(lpfc_sli_intf_if_type,
332 &q->phba->sli4_hba.sli_intf);
333 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
334 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
335 host_index);
336 }
337 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
338 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
339 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
340 } else {
341 return -EINVAL;
342 }
343 writel(doorbell.word0, q->db_regaddr);
344
345 return 0;
346 }
347
348 /**
349 * lpfc_sli4_wq_release - Updates internal hba index for WQ
350 * @q: The Work Queue to operate on.
351 * @index: The index to advance the hba index to.
352 *
353 * This routine will update the HBA index of a queue to reflect consumption of
354 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
355 * an entry the host calls this function to update the queue's internal
356 * pointers.
357 **/
358 static void
lpfc_sli4_wq_release(struct lpfc_queue * q,uint32_t index)359 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
360 {
361 /* sanity check on queue memory */
362 if (unlikely(!q))
363 return;
364
365 q->hba_index = index;
366 }
367
368 /**
369 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
370 * @q: The Mailbox Queue to operate on.
371 * @mqe: The Mailbox Queue Entry to put on the Work queue.
372 *
373 * This routine will copy the contents of @mqe to the next available entry on
374 * the @q. This function will then ring the Work Queue Doorbell to signal the
375 * HBA to start processing the Work Queue Entry. This function returns 0 if
376 * successful. If no entries are available on @q then this function will return
377 * -ENOMEM.
378 * The caller is expected to hold the hbalock when calling this routine.
379 **/
380 static uint32_t
lpfc_sli4_mq_put(struct lpfc_queue * q,struct lpfc_mqe * mqe)381 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
382 {
383 struct lpfc_mqe *temp_mqe;
384 struct lpfc_register doorbell;
385
386 /* sanity check on queue memory */
387 if (unlikely(!q))
388 return -ENOMEM;
389 temp_mqe = lpfc_sli4_qe(q, q->host_index);
390
391 /* If the host has not yet processed the next entry then we are done */
392 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
393 return -ENOMEM;
394 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
395 /* Save off the mailbox pointer for completion */
396 q->phba->mbox = (MAILBOX_t *)temp_mqe;
397
398 /* Update the host index before invoking device */
399 q->host_index = ((q->host_index + 1) % q->entry_count);
400
401 /* Ring Doorbell */
402 doorbell.word0 = 0;
403 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
404 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
405 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
406 return 0;
407 }
408
409 /**
410 * lpfc_sli4_mq_release - Updates internal hba index for MQ
411 * @q: The Mailbox Queue to operate on.
412 *
413 * This routine will update the HBA index of a queue to reflect consumption of
414 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
415 * an entry the host calls this function to update the queue's internal
416 * pointers. This routine returns the number of entries that were consumed by
417 * the HBA.
418 **/
419 static uint32_t
lpfc_sli4_mq_release(struct lpfc_queue * q)420 lpfc_sli4_mq_release(struct lpfc_queue *q)
421 {
422 /* sanity check on queue memory */
423 if (unlikely(!q))
424 return 0;
425
426 /* Clear the mailbox pointer for completion */
427 q->phba->mbox = NULL;
428 q->hba_index = ((q->hba_index + 1) % q->entry_count);
429 return 1;
430 }
431
432 /**
433 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
434 * @q: The Event Queue to get the first valid EQE from
435 *
436 * This routine will get the first valid Event Queue Entry from @q, update
437 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
438 * the Queue (no more work to do), or the Queue is full of EQEs that have been
439 * processed, but not popped back to the HBA then this routine will return NULL.
440 **/
441 static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue * q)442 lpfc_sli4_eq_get(struct lpfc_queue *q)
443 {
444 struct lpfc_eqe *eqe;
445
446 /* sanity check on queue memory */
447 if (unlikely(!q))
448 return NULL;
449 eqe = lpfc_sli4_qe(q, q->host_index);
450
451 /* If the next EQE is not valid then we are done */
452 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
453 return NULL;
454
455 /*
456 * insert barrier for instruction interlock : data from the hardware
457 * must have the valid bit checked before it can be copied and acted
458 * upon. Speculative instructions were allowing a bcopy at the start
459 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
460 * after our return, to copy data before the valid bit check above
461 * was done. As such, some of the copied data was stale. The barrier
462 * ensures the check is before any data is copied.
463 */
464 mb();
465 return eqe;
466 }
467
468 /**
469 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
470 * @q: The Event Queue to disable interrupts
471 *
472 **/
473 void
lpfc_sli4_eq_clr_intr(struct lpfc_queue * q)474 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
475 {
476 struct lpfc_register doorbell;
477
478 doorbell.word0 = 0;
479 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
480 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
481 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
482 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
483 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
484 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
485 }
486
487 /**
488 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
489 * @q: The Event Queue to disable interrupts
490 *
491 **/
492 void
lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue * q)493 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
494 {
495 struct lpfc_register doorbell;
496
497 doorbell.word0 = 0;
498 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
499 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
500 }
501
502 /**
503 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
504 * @phba: adapter with EQ
505 * @q: The Event Queue that the host has completed processing for.
506 * @count: Number of elements that have been consumed
507 * @arm: Indicates whether the host wants to arms this CQ.
508 *
509 * This routine will notify the HBA, by ringing the doorbell, that count
510 * number of EQEs have been processed. The @arm parameter indicates whether
511 * the queue should be rearmed when ringing the doorbell.
512 **/
513 void
lpfc_sli4_write_eq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)514 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
515 uint32_t count, bool arm)
516 {
517 struct lpfc_register doorbell;
518
519 /* sanity check on queue memory */
520 if (unlikely(!q || (count == 0 && !arm)))
521 return;
522
523 /* ring doorbell for number popped */
524 doorbell.word0 = 0;
525 if (arm) {
526 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
527 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
528 }
529 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
530 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
531 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
532 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
533 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
534 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
535 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
536 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
537 readl(q->phba->sli4_hba.EQDBregaddr);
538 }
539
540 /**
541 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
542 * @phba: adapter with EQ
543 * @q: The Event Queue that the host has completed processing for.
544 * @count: Number of elements that have been consumed
545 * @arm: Indicates whether the host wants to arms this CQ.
546 *
547 * This routine will notify the HBA, by ringing the doorbell, that count
548 * number of EQEs have been processed. The @arm parameter indicates whether
549 * the queue should be rearmed when ringing the doorbell.
550 **/
551 void
lpfc_sli4_if6_write_eq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)552 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
553 uint32_t count, bool arm)
554 {
555 struct lpfc_register doorbell;
556
557 /* sanity check on queue memory */
558 if (unlikely(!q || (count == 0 && !arm)))
559 return;
560
561 /* ring doorbell for number popped */
562 doorbell.word0 = 0;
563 if (arm)
564 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
565 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
566 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
567 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
568 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
569 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
570 readl(q->phba->sli4_hba.EQDBregaddr);
571 }
572
573 static void
__lpfc_sli4_consume_eqe(struct lpfc_hba * phba,struct lpfc_queue * eq,struct lpfc_eqe * eqe)574 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
575 struct lpfc_eqe *eqe)
576 {
577 if (!phba->sli4_hba.pc_sli4_params.eqav)
578 bf_set_le32(lpfc_eqe_valid, eqe, 0);
579
580 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
581
582 /* if the index wrapped around, toggle the valid bit */
583 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
584 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
585 }
586
587 static void
lpfc_sli4_eqcq_flush(struct lpfc_hba * phba,struct lpfc_queue * eq)588 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
589 {
590 struct lpfc_eqe *eqe = NULL;
591 u32 eq_count = 0, cq_count = 0;
592 struct lpfc_cqe *cqe = NULL;
593 struct lpfc_queue *cq = NULL, *childq = NULL;
594 int cqid = 0;
595
596 /* walk all the EQ entries and drop on the floor */
597 eqe = lpfc_sli4_eq_get(eq);
598 while (eqe) {
599 /* Get the reference to the corresponding CQ */
600 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
601 cq = NULL;
602
603 list_for_each_entry(childq, &eq->child_list, list) {
604 if (childq->queue_id == cqid) {
605 cq = childq;
606 break;
607 }
608 }
609 /* If CQ is valid, iterate through it and drop all the CQEs */
610 if (cq) {
611 cqe = lpfc_sli4_cq_get(cq);
612 while (cqe) {
613 __lpfc_sli4_consume_cqe(phba, cq, cqe);
614 cq_count++;
615 cqe = lpfc_sli4_cq_get(cq);
616 }
617 /* Clear and re-arm the CQ */
618 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
619 LPFC_QUEUE_REARM);
620 cq_count = 0;
621 }
622 __lpfc_sli4_consume_eqe(phba, eq, eqe);
623 eq_count++;
624 eqe = lpfc_sli4_eq_get(eq);
625 }
626
627 /* Clear and re-arm the EQ */
628 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
629 }
630
631 static int
lpfc_sli4_process_eq(struct lpfc_hba * phba,struct lpfc_queue * eq,u8 rearm,enum lpfc_poll_mode poll_mode)632 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
633 u8 rearm, enum lpfc_poll_mode poll_mode)
634 {
635 struct lpfc_eqe *eqe;
636 int count = 0, consumed = 0;
637
638 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
639 goto rearm_and_exit;
640
641 eqe = lpfc_sli4_eq_get(eq);
642 while (eqe) {
643 lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
644 __lpfc_sli4_consume_eqe(phba, eq, eqe);
645
646 consumed++;
647 if (!(++count % eq->max_proc_limit))
648 break;
649
650 if (!(count % eq->notify_interval)) {
651 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
652 LPFC_QUEUE_NOARM);
653 consumed = 0;
654 }
655
656 eqe = lpfc_sli4_eq_get(eq);
657 }
658 eq->EQ_processed += count;
659
660 /* Track the max number of EQEs processed in 1 intr */
661 if (count > eq->EQ_max_eqe)
662 eq->EQ_max_eqe = count;
663
664 xchg(&eq->queue_claimed, 0);
665
666 rearm_and_exit:
667 /* Always clear the EQ. */
668 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
669
670 return count;
671 }
672
673 /**
674 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
675 * @q: The Completion Queue to get the first valid CQE from
676 *
677 * This routine will get the first valid Completion Queue Entry from @q, update
678 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
679 * the Queue (no more work to do), or the Queue is full of CQEs that have been
680 * processed, but not popped back to the HBA then this routine will return NULL.
681 **/
682 static struct lpfc_cqe *
lpfc_sli4_cq_get(struct lpfc_queue * q)683 lpfc_sli4_cq_get(struct lpfc_queue *q)
684 {
685 struct lpfc_cqe *cqe;
686
687 /* sanity check on queue memory */
688 if (unlikely(!q))
689 return NULL;
690 cqe = lpfc_sli4_qe(q, q->host_index);
691
692 /* If the next CQE is not valid then we are done */
693 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
694 return NULL;
695
696 /*
697 * insert barrier for instruction interlock : data from the hardware
698 * must have the valid bit checked before it can be copied and acted
699 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
700 * instructions allowing action on content before valid bit checked,
701 * add barrier here as well. May not be needed as "content" is a
702 * single 32-bit entity here (vs multi word structure for cq's).
703 */
704 mb();
705 return cqe;
706 }
707
708 static void
__lpfc_sli4_consume_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)709 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
710 struct lpfc_cqe *cqe)
711 {
712 if (!phba->sli4_hba.pc_sli4_params.cqav)
713 bf_set_le32(lpfc_cqe_valid, cqe, 0);
714
715 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
716
717 /* if the index wrapped around, toggle the valid bit */
718 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
719 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
720 }
721
722 /**
723 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
724 * @phba: the adapter with the CQ
725 * @q: The Completion Queue that the host has completed processing for.
726 * @count: the number of elements that were consumed
727 * @arm: Indicates whether the host wants to arms this CQ.
728 *
729 * This routine will notify the HBA, by ringing the doorbell, that the
730 * CQEs have been processed. The @arm parameter specifies whether the
731 * queue should be rearmed when ringing the doorbell.
732 **/
733 void
lpfc_sli4_write_cq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)734 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
735 uint32_t count, bool arm)
736 {
737 struct lpfc_register doorbell;
738
739 /* sanity check on queue memory */
740 if (unlikely(!q || (count == 0 && !arm)))
741 return;
742
743 /* ring doorbell for number popped */
744 doorbell.word0 = 0;
745 if (arm)
746 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
747 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
748 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
749 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
750 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
751 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
752 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
753 }
754
755 /**
756 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
757 * @phba: the adapter with the CQ
758 * @q: The Completion Queue that the host has completed processing for.
759 * @count: the number of elements that were consumed
760 * @arm: Indicates whether the host wants to arms this CQ.
761 *
762 * This routine will notify the HBA, by ringing the doorbell, that the
763 * CQEs have been processed. The @arm parameter specifies whether the
764 * queue should be rearmed when ringing the doorbell.
765 **/
766 void
lpfc_sli4_if6_write_cq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)767 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
768 uint32_t count, bool arm)
769 {
770 struct lpfc_register doorbell;
771
772 /* sanity check on queue memory */
773 if (unlikely(!q || (count == 0 && !arm)))
774 return;
775
776 /* ring doorbell for number popped */
777 doorbell.word0 = 0;
778 if (arm)
779 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
780 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
781 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
782 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
783 }
784
785 /*
786 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
787 *
788 * This routine will copy the contents of @wqe to the next available entry on
789 * the @q. This function will then ring the Receive Queue Doorbell to signal the
790 * HBA to start processing the Receive Queue Entry. This function returns the
791 * index that the rqe was copied to if successful. If no entries are available
792 * on @q then this function will return -ENOMEM.
793 * The caller is expected to hold the hbalock when calling this routine.
794 **/
795 int
lpfc_sli4_rq_put(struct lpfc_queue * hq,struct lpfc_queue * dq,struct lpfc_rqe * hrqe,struct lpfc_rqe * drqe)796 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
797 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
798 {
799 struct lpfc_rqe *temp_hrqe;
800 struct lpfc_rqe *temp_drqe;
801 struct lpfc_register doorbell;
802 int hq_put_index;
803 int dq_put_index;
804
805 /* sanity check on queue memory */
806 if (unlikely(!hq) || unlikely(!dq))
807 return -ENOMEM;
808 hq_put_index = hq->host_index;
809 dq_put_index = dq->host_index;
810 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
811 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
812
813 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
814 return -EINVAL;
815 if (hq_put_index != dq_put_index)
816 return -EINVAL;
817 /* If the host has not yet processed the next entry then we are done */
818 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
819 return -EBUSY;
820 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
821 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
822
823 /* Update the host index to point to the next slot */
824 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
825 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
826 hq->RQ_buf_posted++;
827
828 /* Ring The Header Receive Queue Doorbell */
829 if (!(hq->host_index % hq->notify_interval)) {
830 doorbell.word0 = 0;
831 if (hq->db_format == LPFC_DB_RING_FORMAT) {
832 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
833 hq->notify_interval);
834 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
835 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
836 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
837 hq->notify_interval);
838 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
839 hq->host_index);
840 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
841 } else {
842 return -EINVAL;
843 }
844 writel(doorbell.word0, hq->db_regaddr);
845 }
846 return hq_put_index;
847 }
848
849 /*
850 * lpfc_sli4_rq_release - Updates internal hba index for RQ
851 *
852 * This routine will update the HBA index of a queue to reflect consumption of
853 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
854 * consumed an entry the host calls this function to update the queue's
855 * internal pointers. This routine returns the number of entries that were
856 * consumed by the HBA.
857 **/
858 static uint32_t
lpfc_sli4_rq_release(struct lpfc_queue * hq,struct lpfc_queue * dq)859 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
860 {
861 /* sanity check on queue memory */
862 if (unlikely(!hq) || unlikely(!dq))
863 return 0;
864
865 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
866 return 0;
867 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
868 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
869 return 1;
870 }
871
872 /**
873 * lpfc_cmd_iocb - Get next command iocb entry in the ring
874 * @phba: Pointer to HBA context object.
875 * @pring: Pointer to driver SLI ring object.
876 *
877 * This function returns pointer to next command iocb entry
878 * in the command ring. The caller must hold hbalock to prevent
879 * other threads consume the next command iocb.
880 * SLI-2/SLI-3 provide different sized iocbs.
881 **/
882 static inline IOCB_t *
lpfc_cmd_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)883 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
884 {
885 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
886 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
887 }
888
889 /**
890 * lpfc_resp_iocb - Get next response iocb entry in the ring
891 * @phba: Pointer to HBA context object.
892 * @pring: Pointer to driver SLI ring object.
893 *
894 * This function returns pointer to next response iocb entry
895 * in the response ring. The caller must hold hbalock to make sure
896 * that no other thread consume the next response iocb.
897 * SLI-2/SLI-3 provide different sized iocbs.
898 **/
899 static inline IOCB_t *
lpfc_resp_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)900 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
901 {
902 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
903 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
904 }
905
906 /**
907 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
908 * @phba: Pointer to HBA context object.
909 *
910 * This function is called with hbalock held. This function
911 * allocates a new driver iocb object from the iocb pool. If the
912 * allocation is successful, it returns pointer to the newly
913 * allocated iocb object else it returns NULL.
914 **/
915 struct lpfc_iocbq *
__lpfc_sli_get_iocbq(struct lpfc_hba * phba)916 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
917 {
918 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
919 struct lpfc_iocbq * iocbq = NULL;
920
921 lockdep_assert_held(&phba->hbalock);
922
923 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
924 if (iocbq)
925 phba->iocb_cnt++;
926 if (phba->iocb_cnt > phba->iocb_max)
927 phba->iocb_max = phba->iocb_cnt;
928 return iocbq;
929 }
930
931 /**
932 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
933 * @phba: Pointer to HBA context object.
934 * @xritag: XRI value.
935 *
936 * This function clears the sglq pointer from the array of active
937 * sglq's. The xritag that is passed in is used to index into the
938 * array. Before the xritag can be used it needs to be adjusted
939 * by subtracting the xribase.
940 *
941 * Returns sglq ponter = success, NULL = Failure.
942 **/
943 struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba * phba,uint16_t xritag)944 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
945 {
946 struct lpfc_sglq *sglq;
947
948 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
949 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
950 return sglq;
951 }
952
953 /**
954 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
955 * @phba: Pointer to HBA context object.
956 * @xritag: XRI value.
957 *
958 * This function returns the sglq pointer from the array of active
959 * sglq's. The xritag that is passed in is used to index into the
960 * array. Before the xritag can be used it needs to be adjusted
961 * by subtracting the xribase.
962 *
963 * Returns sglq ponter = success, NULL = Failure.
964 **/
965 struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba * phba,uint16_t xritag)966 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
967 {
968 struct lpfc_sglq *sglq;
969
970 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
971 return sglq;
972 }
973
974 /**
975 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
976 * @phba: Pointer to HBA context object.
977 * @xritag: xri used in this exchange.
978 * @rrq: The RRQ to be cleared.
979 *
980 **/
981 void
lpfc_clr_rrq_active(struct lpfc_hba * phba,uint16_t xritag,struct lpfc_node_rrq * rrq)982 lpfc_clr_rrq_active(struct lpfc_hba *phba,
983 uint16_t xritag,
984 struct lpfc_node_rrq *rrq)
985 {
986 struct lpfc_nodelist *ndlp = NULL;
987
988 /* Lookup did to verify if did is still active on this vport */
989 if (rrq->vport)
990 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
991
992 if (!ndlp)
993 goto out;
994
995 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
996 rrq->send_rrq = 0;
997 rrq->xritag = 0;
998 rrq->rrq_stop_time = 0;
999 }
1000 out:
1001 mempool_free(rrq, phba->rrq_pool);
1002 }
1003
1004 /**
1005 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1006 * @phba: Pointer to HBA context object.
1007 *
1008 * This function is called with hbalock held. This function
1009 * Checks if stop_time (ratov from setting rrq active) has
1010 * been reached, if it has and the send_rrq flag is set then
1011 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1012 * then it will just call the routine to clear the rrq and
1013 * free the rrq resource.
1014 * The timer is set to the next rrq that is going to expire before
1015 * leaving the routine.
1016 *
1017 **/
1018 void
lpfc_handle_rrq_active(struct lpfc_hba * phba)1019 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1020 {
1021 struct lpfc_node_rrq *rrq;
1022 struct lpfc_node_rrq *nextrrq;
1023 unsigned long next_time;
1024 unsigned long iflags;
1025 LIST_HEAD(send_rrq);
1026
1027 spin_lock_irqsave(&phba->hbalock, iflags);
1028 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1029 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1030 list_for_each_entry_safe(rrq, nextrrq,
1031 &phba->active_rrq_list, list) {
1032 if (time_after(jiffies, rrq->rrq_stop_time))
1033 list_move(&rrq->list, &send_rrq);
1034 else if (time_before(rrq->rrq_stop_time, next_time))
1035 next_time = rrq->rrq_stop_time;
1036 }
1037 spin_unlock_irqrestore(&phba->hbalock, iflags);
1038 if ((!list_empty(&phba->active_rrq_list)) &&
1039 (!(phba->pport->load_flag & FC_UNLOADING)))
1040 mod_timer(&phba->rrq_tmr, next_time);
1041 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1042 list_del(&rrq->list);
1043 if (!rrq->send_rrq) {
1044 /* this call will free the rrq */
1045 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1046 } else if (lpfc_send_rrq(phba, rrq)) {
1047 /* if we send the rrq then the completion handler
1048 * will clear the bit in the xribitmap.
1049 */
1050 lpfc_clr_rrq_active(phba, rrq->xritag,
1051 rrq);
1052 }
1053 }
1054 }
1055
1056 /**
1057 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1058 * @vport: Pointer to vport context object.
1059 * @xri: The xri used in the exchange.
1060 * @did: The targets DID for this exchange.
1061 *
1062 * returns NULL = rrq not found in the phba->active_rrq_list.
1063 * rrq = rrq for this xri and target.
1064 **/
1065 struct lpfc_node_rrq *
lpfc_get_active_rrq(struct lpfc_vport * vport,uint16_t xri,uint32_t did)1066 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1067 {
1068 struct lpfc_hba *phba = vport->phba;
1069 struct lpfc_node_rrq *rrq;
1070 struct lpfc_node_rrq *nextrrq;
1071 unsigned long iflags;
1072
1073 if (phba->sli_rev != LPFC_SLI_REV4)
1074 return NULL;
1075 spin_lock_irqsave(&phba->hbalock, iflags);
1076 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1077 if (rrq->vport == vport && rrq->xritag == xri &&
1078 rrq->nlp_DID == did){
1079 list_del(&rrq->list);
1080 spin_unlock_irqrestore(&phba->hbalock, iflags);
1081 return rrq;
1082 }
1083 }
1084 spin_unlock_irqrestore(&phba->hbalock, iflags);
1085 return NULL;
1086 }
1087
1088 /**
1089 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1090 * @vport: Pointer to vport context object.
1091 * @ndlp: Pointer to the lpfc_node_list structure.
1092 * If ndlp is NULL Remove all active RRQs for this vport from the
1093 * phba->active_rrq_list and clear the rrq.
1094 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1095 **/
1096 void
lpfc_cleanup_vports_rrqs(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)1097 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1098
1099 {
1100 struct lpfc_hba *phba = vport->phba;
1101 struct lpfc_node_rrq *rrq;
1102 struct lpfc_node_rrq *nextrrq;
1103 unsigned long iflags;
1104 LIST_HEAD(rrq_list);
1105
1106 if (phba->sli_rev != LPFC_SLI_REV4)
1107 return;
1108 if (!ndlp) {
1109 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1110 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1111 }
1112 spin_lock_irqsave(&phba->hbalock, iflags);
1113 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1114 if (rrq->vport != vport)
1115 continue;
1116
1117 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1118 list_move(&rrq->list, &rrq_list);
1119
1120 }
1121 spin_unlock_irqrestore(&phba->hbalock, iflags);
1122
1123 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1124 list_del(&rrq->list);
1125 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1126 }
1127 }
1128
1129 /**
1130 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1131 * @phba: Pointer to HBA context object.
1132 * @ndlp: Targets nodelist pointer for this exchange.
1133 * @xritag: the xri in the bitmap to test.
1134 *
1135 * This function returns:
1136 * 0 = rrq not active for this xri
1137 * 1 = rrq is valid for this xri.
1138 **/
1139 int
lpfc_test_rrq_active(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,uint16_t xritag)1140 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1141 uint16_t xritag)
1142 {
1143 if (!ndlp)
1144 return 0;
1145 if (!ndlp->active_rrqs_xri_bitmap)
1146 return 0;
1147 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1148 return 1;
1149 else
1150 return 0;
1151 }
1152
1153 /**
1154 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1155 * @phba: Pointer to HBA context object.
1156 * @ndlp: nodelist pointer for this target.
1157 * @xritag: xri used in this exchange.
1158 * @rxid: Remote Exchange ID.
1159 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1160 *
1161 * This function takes the hbalock.
1162 * The active bit is always set in the active rrq xri_bitmap even
1163 * if there is no slot avaiable for the other rrq information.
1164 *
1165 * returns 0 rrq actived for this xri
1166 * < 0 No memory or invalid ndlp.
1167 **/
1168 int
lpfc_set_rrq_active(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,uint16_t xritag,uint16_t rxid,uint16_t send_rrq)1169 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1170 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1171 {
1172 unsigned long iflags;
1173 struct lpfc_node_rrq *rrq;
1174 int empty;
1175
1176 if (!ndlp)
1177 return -EINVAL;
1178
1179 if (!phba->cfg_enable_rrq)
1180 return -EINVAL;
1181
1182 spin_lock_irqsave(&phba->hbalock, iflags);
1183 if (phba->pport->load_flag & FC_UNLOADING) {
1184 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1185 goto out;
1186 }
1187
1188 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1189 goto out;
1190
1191 if (!ndlp->active_rrqs_xri_bitmap)
1192 goto out;
1193
1194 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1195 goto out;
1196
1197 spin_unlock_irqrestore(&phba->hbalock, iflags);
1198 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1199 if (!rrq) {
1200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1201 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1202 " DID:0x%x Send:%d\n",
1203 xritag, rxid, ndlp->nlp_DID, send_rrq);
1204 return -EINVAL;
1205 }
1206 if (phba->cfg_enable_rrq == 1)
1207 rrq->send_rrq = send_rrq;
1208 else
1209 rrq->send_rrq = 0;
1210 rrq->xritag = xritag;
1211 rrq->rrq_stop_time = jiffies +
1212 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1213 rrq->nlp_DID = ndlp->nlp_DID;
1214 rrq->vport = ndlp->vport;
1215 rrq->rxid = rxid;
1216 spin_lock_irqsave(&phba->hbalock, iflags);
1217 empty = list_empty(&phba->active_rrq_list);
1218 list_add_tail(&rrq->list, &phba->active_rrq_list);
1219 phba->hba_flag |= HBA_RRQ_ACTIVE;
1220 spin_unlock_irqrestore(&phba->hbalock, iflags);
1221 if (empty)
1222 lpfc_worker_wake_up(phba);
1223 return 0;
1224 out:
1225 spin_unlock_irqrestore(&phba->hbalock, iflags);
1226 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1227 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1228 " DID:0x%x Send:%d\n",
1229 xritag, rxid, ndlp->nlp_DID, send_rrq);
1230 return -EINVAL;
1231 }
1232
1233 /**
1234 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1235 * @phba: Pointer to HBA context object.
1236 * @piocbq: Pointer to the iocbq.
1237 *
1238 * The driver calls this function with either the nvme ls ring lock
1239 * or the fc els ring lock held depending on the iocb usage. This function
1240 * gets a new driver sglq object from the sglq list. If the list is not empty
1241 * then it is successful, it returns pointer to the newly allocated sglq
1242 * object else it returns NULL.
1243 **/
1244 static struct lpfc_sglq *
__lpfc_sli_get_els_sglq(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq)1245 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1246 {
1247 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1248 struct lpfc_sglq *sglq = NULL;
1249 struct lpfc_sglq *start_sglq = NULL;
1250 struct lpfc_io_buf *lpfc_cmd;
1251 struct lpfc_nodelist *ndlp;
1252 int found = 0;
1253 u8 cmnd;
1254
1255 cmnd = get_job_cmnd(phba, piocbq);
1256
1257 if (piocbq->cmd_flag & LPFC_IO_FCP) {
1258 lpfc_cmd = piocbq->io_buf;
1259 ndlp = lpfc_cmd->rdata->pnode;
1260 } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1261 !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1262 ndlp = piocbq->ndlp;
1263 } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1264 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1265 ndlp = NULL;
1266 else
1267 ndlp = piocbq->ndlp;
1268 } else {
1269 ndlp = piocbq->ndlp;
1270 }
1271
1272 spin_lock(&phba->sli4_hba.sgl_list_lock);
1273 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1274 start_sglq = sglq;
1275 while (!found) {
1276 if (!sglq)
1277 break;
1278 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1279 test_bit(sglq->sli4_lxritag,
1280 ndlp->active_rrqs_xri_bitmap)) {
1281 /* This xri has an rrq outstanding for this DID.
1282 * put it back in the list and get another xri.
1283 */
1284 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1285 sglq = NULL;
1286 list_remove_head(lpfc_els_sgl_list, sglq,
1287 struct lpfc_sglq, list);
1288 if (sglq == start_sglq) {
1289 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1290 sglq = NULL;
1291 break;
1292 } else
1293 continue;
1294 }
1295 sglq->ndlp = ndlp;
1296 found = 1;
1297 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1298 sglq->state = SGL_ALLOCATED;
1299 }
1300 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1301 return sglq;
1302 }
1303
1304 /**
1305 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1306 * @phba: Pointer to HBA context object.
1307 * @piocbq: Pointer to the iocbq.
1308 *
1309 * This function is called with the sgl_list lock held. This function
1310 * gets a new driver sglq object from the sglq list. If the
1311 * list is not empty then it is successful, it returns pointer to the newly
1312 * allocated sglq object else it returns NULL.
1313 **/
1314 struct lpfc_sglq *
__lpfc_sli_get_nvmet_sglq(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq)1315 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1316 {
1317 struct list_head *lpfc_nvmet_sgl_list;
1318 struct lpfc_sglq *sglq = NULL;
1319
1320 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1321
1322 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1323
1324 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1325 if (!sglq)
1326 return NULL;
1327 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1328 sglq->state = SGL_ALLOCATED;
1329 return sglq;
1330 }
1331
1332 /**
1333 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1334 * @phba: Pointer to HBA context object.
1335 *
1336 * This function is called with no lock held. This function
1337 * allocates a new driver iocb object from the iocb pool. If the
1338 * allocation is successful, it returns pointer to the newly
1339 * allocated iocb object else it returns NULL.
1340 **/
1341 struct lpfc_iocbq *
lpfc_sli_get_iocbq(struct lpfc_hba * phba)1342 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1343 {
1344 struct lpfc_iocbq * iocbq = NULL;
1345 unsigned long iflags;
1346
1347 spin_lock_irqsave(&phba->hbalock, iflags);
1348 iocbq = __lpfc_sli_get_iocbq(phba);
1349 spin_unlock_irqrestore(&phba->hbalock, iflags);
1350 return iocbq;
1351 }
1352
1353 /**
1354 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1355 * @phba: Pointer to HBA context object.
1356 * @iocbq: Pointer to driver iocb object.
1357 *
1358 * This function is called to release the driver iocb object
1359 * to the iocb pool. The iotag in the iocb object
1360 * does not change for each use of the iocb object. This function
1361 * clears all other fields of the iocb object when it is freed.
1362 * The sqlq structure that holds the xritag and phys and virtual
1363 * mappings for the scatter gather list is retrieved from the
1364 * active array of sglq. The get of the sglq pointer also clears
1365 * the entry in the array. If the status of the IO indiactes that
1366 * this IO was aborted then the sglq entry it put on the
1367 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1368 * IO has good status or fails for any other reason then the sglq
1369 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1370 * asserted held in the code path calling this routine.
1371 **/
1372 static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1373 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1374 {
1375 struct lpfc_sglq *sglq;
1376 unsigned long iflag = 0;
1377 struct lpfc_sli_ring *pring;
1378
1379 if (iocbq->sli4_xritag == NO_XRI)
1380 sglq = NULL;
1381 else
1382 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1383
1384
1385 if (sglq) {
1386 if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1387 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1388 iflag);
1389 sglq->state = SGL_FREED;
1390 sglq->ndlp = NULL;
1391 list_add_tail(&sglq->list,
1392 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1393 spin_unlock_irqrestore(
1394 &phba->sli4_hba.sgl_list_lock, iflag);
1395 goto out;
1396 }
1397
1398 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1399 (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1400 sglq->state != SGL_XRI_ABORTED) {
1401 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1402 iflag);
1403
1404 /* Check if we can get a reference on ndlp */
1405 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1406 sglq->ndlp = NULL;
1407
1408 list_add(&sglq->list,
1409 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1410 spin_unlock_irqrestore(
1411 &phba->sli4_hba.sgl_list_lock, iflag);
1412 } else {
1413 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1414 iflag);
1415 sglq->state = SGL_FREED;
1416 sglq->ndlp = NULL;
1417 list_add_tail(&sglq->list,
1418 &phba->sli4_hba.lpfc_els_sgl_list);
1419 spin_unlock_irqrestore(
1420 &phba->sli4_hba.sgl_list_lock, iflag);
1421 pring = lpfc_phba_elsring(phba);
1422 /* Check if TXQ queue needs to be serviced */
1423 if (pring && (!list_empty(&pring->txq)))
1424 lpfc_worker_wake_up(phba);
1425 }
1426 }
1427
1428 out:
1429 /*
1430 * Clean all volatile data fields, preserve iotag and node struct.
1431 */
1432 memset_startat(iocbq, 0, wqe);
1433 iocbq->sli4_lxritag = NO_XRI;
1434 iocbq->sli4_xritag = NO_XRI;
1435 iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1436 LPFC_IO_NVME_LS);
1437 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1438 }
1439
1440
1441 /**
1442 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1443 * @phba: Pointer to HBA context object.
1444 * @iocbq: Pointer to driver iocb object.
1445 *
1446 * This function is called to release the driver iocb object to the
1447 * iocb pool. The iotag in the iocb object does not change for each
1448 * use of the iocb object. This function clears all other fields of
1449 * the iocb object when it is freed. The hbalock is asserted held in
1450 * the code path calling this routine.
1451 **/
1452 static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1453 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1454 {
1455
1456 /*
1457 * Clean all volatile data fields, preserve iotag and node struct.
1458 */
1459 memset_startat(iocbq, 0, iocb);
1460 iocbq->sli4_xritag = NO_XRI;
1461 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1462 }
1463
1464 /**
1465 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1466 * @phba: Pointer to HBA context object.
1467 * @iocbq: Pointer to driver iocb object.
1468 *
1469 * This function is called with hbalock held to release driver
1470 * iocb object to the iocb pool. The iotag in the iocb object
1471 * does not change for each use of the iocb object. This function
1472 * clears all other fields of the iocb object when it is freed.
1473 **/
1474 static void
__lpfc_sli_release_iocbq(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1475 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1476 {
1477 lockdep_assert_held(&phba->hbalock);
1478
1479 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1480 phba->iocb_cnt--;
1481 }
1482
1483 /**
1484 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1485 * @phba: Pointer to HBA context object.
1486 * @iocbq: Pointer to driver iocb object.
1487 *
1488 * This function is called with no lock held to release the iocb to
1489 * iocb pool.
1490 **/
1491 void
lpfc_sli_release_iocbq(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1492 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1493 {
1494 unsigned long iflags;
1495
1496 /*
1497 * Clean all volatile data fields, preserve iotag and node struct.
1498 */
1499 spin_lock_irqsave(&phba->hbalock, iflags);
1500 __lpfc_sli_release_iocbq(phba, iocbq);
1501 spin_unlock_irqrestore(&phba->hbalock, iflags);
1502 }
1503
1504 /**
1505 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1506 * @phba: Pointer to HBA context object.
1507 * @iocblist: List of IOCBs.
1508 * @ulpstatus: ULP status in IOCB command field.
1509 * @ulpWord4: ULP word-4 in IOCB command field.
1510 *
1511 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1512 * on the list by invoking the complete callback function associated with the
1513 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1514 * fields.
1515 **/
1516 void
lpfc_sli_cancel_iocbs(struct lpfc_hba * phba,struct list_head * iocblist,uint32_t ulpstatus,uint32_t ulpWord4)1517 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1518 uint32_t ulpstatus, uint32_t ulpWord4)
1519 {
1520 struct lpfc_iocbq *piocb;
1521
1522 while (!list_empty(iocblist)) {
1523 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1524 if (piocb->cmd_cmpl) {
1525 if (piocb->cmd_flag & LPFC_IO_NVME) {
1526 lpfc_nvme_cancel_iocb(phba, piocb,
1527 ulpstatus, ulpWord4);
1528 } else {
1529 if (phba->sli_rev == LPFC_SLI_REV4) {
1530 bf_set(lpfc_wcqe_c_status,
1531 &piocb->wcqe_cmpl, ulpstatus);
1532 piocb->wcqe_cmpl.parameter = ulpWord4;
1533 } else {
1534 piocb->iocb.ulpStatus = ulpstatus;
1535 piocb->iocb.un.ulpWord[4] = ulpWord4;
1536 }
1537 (piocb->cmd_cmpl) (phba, piocb, piocb);
1538 }
1539 } else {
1540 lpfc_sli_release_iocbq(phba, piocb);
1541 }
1542 }
1543 return;
1544 }
1545
1546 /**
1547 * lpfc_sli_iocb_cmd_type - Get the iocb type
1548 * @iocb_cmnd: iocb command code.
1549 *
1550 * This function is called by ring event handler function to get the iocb type.
1551 * This function translates the iocb command to an iocb command type used to
1552 * decide the final disposition of each completed IOCB.
1553 * The function returns
1554 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1555 * LPFC_SOL_IOCB if it is a solicited iocb completion
1556 * LPFC_ABORT_IOCB if it is an abort iocb
1557 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1558 *
1559 * The caller is not required to hold any lock.
1560 **/
1561 static lpfc_iocb_type
lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)1562 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1563 {
1564 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1565
1566 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1567 return 0;
1568
1569 switch (iocb_cmnd) {
1570 case CMD_XMIT_SEQUENCE_CR:
1571 case CMD_XMIT_SEQUENCE_CX:
1572 case CMD_XMIT_BCAST_CN:
1573 case CMD_XMIT_BCAST_CX:
1574 case CMD_ELS_REQUEST_CR:
1575 case CMD_ELS_REQUEST_CX:
1576 case CMD_CREATE_XRI_CR:
1577 case CMD_CREATE_XRI_CX:
1578 case CMD_GET_RPI_CN:
1579 case CMD_XMIT_ELS_RSP_CX:
1580 case CMD_GET_RPI_CR:
1581 case CMD_FCP_IWRITE_CR:
1582 case CMD_FCP_IWRITE_CX:
1583 case CMD_FCP_IREAD_CR:
1584 case CMD_FCP_IREAD_CX:
1585 case CMD_FCP_ICMND_CR:
1586 case CMD_FCP_ICMND_CX:
1587 case CMD_FCP_TSEND_CX:
1588 case CMD_FCP_TRSP_CX:
1589 case CMD_FCP_TRECEIVE_CX:
1590 case CMD_FCP_AUTO_TRSP_CX:
1591 case CMD_ADAPTER_MSG:
1592 case CMD_ADAPTER_DUMP:
1593 case CMD_XMIT_SEQUENCE64_CR:
1594 case CMD_XMIT_SEQUENCE64_CX:
1595 case CMD_XMIT_BCAST64_CN:
1596 case CMD_XMIT_BCAST64_CX:
1597 case CMD_ELS_REQUEST64_CR:
1598 case CMD_ELS_REQUEST64_CX:
1599 case CMD_FCP_IWRITE64_CR:
1600 case CMD_FCP_IWRITE64_CX:
1601 case CMD_FCP_IREAD64_CR:
1602 case CMD_FCP_IREAD64_CX:
1603 case CMD_FCP_ICMND64_CR:
1604 case CMD_FCP_ICMND64_CX:
1605 case CMD_FCP_TSEND64_CX:
1606 case CMD_FCP_TRSP64_CX:
1607 case CMD_FCP_TRECEIVE64_CX:
1608 case CMD_GEN_REQUEST64_CR:
1609 case CMD_GEN_REQUEST64_CX:
1610 case CMD_XMIT_ELS_RSP64_CX:
1611 case DSSCMD_IWRITE64_CR:
1612 case DSSCMD_IWRITE64_CX:
1613 case DSSCMD_IREAD64_CR:
1614 case DSSCMD_IREAD64_CX:
1615 case CMD_SEND_FRAME:
1616 type = LPFC_SOL_IOCB;
1617 break;
1618 case CMD_ABORT_XRI_CN:
1619 case CMD_ABORT_XRI_CX:
1620 case CMD_CLOSE_XRI_CN:
1621 case CMD_CLOSE_XRI_CX:
1622 case CMD_XRI_ABORTED_CX:
1623 case CMD_ABORT_MXRI64_CN:
1624 case CMD_XMIT_BLS_RSP64_CX:
1625 type = LPFC_ABORT_IOCB;
1626 break;
1627 case CMD_RCV_SEQUENCE_CX:
1628 case CMD_RCV_ELS_REQ_CX:
1629 case CMD_RCV_SEQUENCE64_CX:
1630 case CMD_RCV_ELS_REQ64_CX:
1631 case CMD_ASYNC_STATUS:
1632 case CMD_IOCB_RCV_SEQ64_CX:
1633 case CMD_IOCB_RCV_ELS64_CX:
1634 case CMD_IOCB_RCV_CONT64_CX:
1635 case CMD_IOCB_RET_XRI64_CX:
1636 type = LPFC_UNSOL_IOCB;
1637 break;
1638 case CMD_IOCB_XMIT_MSEQ64_CR:
1639 case CMD_IOCB_XMIT_MSEQ64_CX:
1640 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1641 case CMD_IOCB_RCV_ELS_LIST64_CX:
1642 case CMD_IOCB_CLOSE_EXTENDED_CN:
1643 case CMD_IOCB_ABORT_EXTENDED_CN:
1644 case CMD_IOCB_RET_HBQE64_CN:
1645 case CMD_IOCB_FCP_IBIDIR64_CR:
1646 case CMD_IOCB_FCP_IBIDIR64_CX:
1647 case CMD_IOCB_FCP_ITASKMGT64_CX:
1648 case CMD_IOCB_LOGENTRY_CN:
1649 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1650 printk("%s - Unhandled SLI-3 Command x%x\n",
1651 __func__, iocb_cmnd);
1652 type = LPFC_UNKNOWN_IOCB;
1653 break;
1654 default:
1655 type = LPFC_UNKNOWN_IOCB;
1656 break;
1657 }
1658
1659 return type;
1660 }
1661
1662 /**
1663 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1664 * @phba: Pointer to HBA context object.
1665 *
1666 * This function is called from SLI initialization code
1667 * to configure every ring of the HBA's SLI interface. The
1668 * caller is not required to hold any lock. This function issues
1669 * a config_ring mailbox command for each ring.
1670 * This function returns zero if successful else returns a negative
1671 * error code.
1672 **/
1673 static int
lpfc_sli_ring_map(struct lpfc_hba * phba)1674 lpfc_sli_ring_map(struct lpfc_hba *phba)
1675 {
1676 struct lpfc_sli *psli = &phba->sli;
1677 LPFC_MBOXQ_t *pmb;
1678 MAILBOX_t *pmbox;
1679 int i, rc, ret = 0;
1680
1681 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1682 if (!pmb)
1683 return -ENOMEM;
1684 pmbox = &pmb->u.mb;
1685 phba->link_state = LPFC_INIT_MBX_CMDS;
1686 for (i = 0; i < psli->num_rings; i++) {
1687 lpfc_config_ring(phba, i, pmb);
1688 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1689 if (rc != MBX_SUCCESS) {
1690 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1691 "0446 Adapter failed to init (%d), "
1692 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1693 "ring %d\n",
1694 rc, pmbox->mbxCommand,
1695 pmbox->mbxStatus, i);
1696 phba->link_state = LPFC_HBA_ERROR;
1697 ret = -ENXIO;
1698 break;
1699 }
1700 }
1701 mempool_free(pmb, phba->mbox_mem_pool);
1702 return ret;
1703 }
1704
1705 /**
1706 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1707 * @phba: Pointer to HBA context object.
1708 * @pring: Pointer to driver SLI ring object.
1709 * @piocb: Pointer to the driver iocb object.
1710 *
1711 * The driver calls this function with the hbalock held for SLI3 ports or
1712 * the ring lock held for SLI4 ports. The function adds the
1713 * new iocb to txcmplq of the given ring. This function always returns
1714 * 0. If this function is called for ELS ring, this function checks if
1715 * there is a vport associated with the ELS command. This function also
1716 * starts els_tmofunc timer if this is an ELS command.
1717 **/
1718 static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocb)1719 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1720 struct lpfc_iocbq *piocb)
1721 {
1722 u32 ulp_command = 0;
1723
1724 BUG_ON(!piocb);
1725 ulp_command = get_job_cmnd(phba, piocb);
1726
1727 list_add_tail(&piocb->list, &pring->txcmplq);
1728 piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1729 pring->txcmplq_cnt++;
1730 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1731 (ulp_command != CMD_ABORT_XRI_WQE) &&
1732 (ulp_command != CMD_ABORT_XRI_CN) &&
1733 (ulp_command != CMD_CLOSE_XRI_CN)) {
1734 BUG_ON(!piocb->vport);
1735 if (!(piocb->vport->load_flag & FC_UNLOADING))
1736 mod_timer(&piocb->vport->els_tmofunc,
1737 jiffies +
1738 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1739 }
1740
1741 return 0;
1742 }
1743
1744 /**
1745 * lpfc_sli_ringtx_get - Get first element of the txq
1746 * @phba: Pointer to HBA context object.
1747 * @pring: Pointer to driver SLI ring object.
1748 *
1749 * This function is called with hbalock held to get next
1750 * iocb in txq of the given ring. If there is any iocb in
1751 * the txq, the function returns first iocb in the list after
1752 * removing the iocb from the list, else it returns NULL.
1753 **/
1754 struct lpfc_iocbq *
lpfc_sli_ringtx_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1755 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1756 {
1757 struct lpfc_iocbq *cmd_iocb;
1758
1759 lockdep_assert_held(&phba->hbalock);
1760
1761 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1762 return cmd_iocb;
1763 }
1764
1765 /**
1766 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1767 * @phba: Pointer to HBA context object.
1768 * @cmdiocb: Pointer to driver command iocb object.
1769 * @rspiocb: Pointer to driver response iocb object.
1770 *
1771 * This routine will inform the driver of any BW adjustments we need
1772 * to make. These changes will be picked up during the next CMF
1773 * timer interrupt. In addition, any BW changes will be logged
1774 * with LOG_CGN_MGMT.
1775 **/
1776 static void
lpfc_cmf_sync_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1777 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1778 struct lpfc_iocbq *rspiocb)
1779 {
1780 union lpfc_wqe128 *wqe;
1781 uint32_t status, info;
1782 struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1783 uint64_t bw, bwdif, slop;
1784 uint64_t pcent, bwpcent;
1785 int asig, afpin, sigcnt, fpincnt;
1786 int wsigmax, wfpinmax, cg, tdp;
1787 char *s;
1788
1789 /* First check for error */
1790 status = bf_get(lpfc_wcqe_c_status, wcqe);
1791 if (status) {
1792 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1793 "6211 CMF_SYNC_WQE Error "
1794 "req_tag x%x status x%x hwstatus x%x "
1795 "tdatap x%x parm x%x\n",
1796 bf_get(lpfc_wcqe_c_request_tag, wcqe),
1797 bf_get(lpfc_wcqe_c_status, wcqe),
1798 bf_get(lpfc_wcqe_c_hw_status, wcqe),
1799 wcqe->total_data_placed,
1800 wcqe->parameter);
1801 goto out;
1802 }
1803
1804 /* Gather congestion information on a successful cmpl */
1805 info = wcqe->parameter;
1806 phba->cmf_active_info = info;
1807
1808 /* See if firmware info count is valid or has changed */
1809 if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1810 info = 0;
1811 else
1812 phba->cmf_info_per_interval = info;
1813
1814 tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1815 cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1816
1817 /* Get BW requirement from firmware */
1818 bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1819 if (!bw) {
1820 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1821 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1822 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1823 goto out;
1824 }
1825
1826 /* Gather information needed for logging if a BW change is required */
1827 wqe = &cmdiocb->wqe;
1828 asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1829 afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1830 fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1831 sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1832 if (phba->cmf_max_bytes_per_interval != bw ||
1833 (asig || afpin || sigcnt || fpincnt)) {
1834 /* Are we increasing or decreasing BW */
1835 if (phba->cmf_max_bytes_per_interval < bw) {
1836 bwdif = bw - phba->cmf_max_bytes_per_interval;
1837 s = "Increase";
1838 } else {
1839 bwdif = phba->cmf_max_bytes_per_interval - bw;
1840 s = "Decrease";
1841 }
1842
1843 /* What is the change percentage */
1844 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1845 pcent = div64_u64(bwdif * 100 + slop,
1846 phba->cmf_link_byte_count);
1847 bwpcent = div64_u64(bw * 100 + slop,
1848 phba->cmf_link_byte_count);
1849 /* Because of bytes adjustment due to shorter timer in
1850 * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and
1851 * may seem like BW is above 100%.
1852 */
1853 if (bwpcent > 100)
1854 bwpcent = 100;
1855
1856 if (phba->cmf_max_bytes_per_interval < bw &&
1857 bwpcent > 95)
1858 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1859 "6208 Congestion bandwidth "
1860 "limits removed\n");
1861 else if ((phba->cmf_max_bytes_per_interval > bw) &&
1862 ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95))
1863 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1864 "6209 Congestion bandwidth "
1865 "limits in effect\n");
1866
1867 if (asig) {
1868 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1869 "6237 BW Threshold %lld%% (%lld): "
1870 "%lld%% %s: Signal Alarm: cg:%d "
1871 "Info:%u\n",
1872 bwpcent, bw, pcent, s, cg,
1873 phba->cmf_active_info);
1874 } else if (afpin) {
1875 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1876 "6238 BW Threshold %lld%% (%lld): "
1877 "%lld%% %s: FPIN Alarm: cg:%d "
1878 "Info:%u\n",
1879 bwpcent, bw, pcent, s, cg,
1880 phba->cmf_active_info);
1881 } else if (sigcnt) {
1882 wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1883 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1884 "6239 BW Threshold %lld%% (%lld): "
1885 "%lld%% %s: Signal Warning: "
1886 "Cnt %d Max %d: cg:%d Info:%u\n",
1887 bwpcent, bw, pcent, s, sigcnt,
1888 wsigmax, cg, phba->cmf_active_info);
1889 } else if (fpincnt) {
1890 wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1891 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1892 "6240 BW Threshold %lld%% (%lld): "
1893 "%lld%% %s: FPIN Warning: "
1894 "Cnt %d Max %d: cg:%d Info:%u\n",
1895 bwpcent, bw, pcent, s, fpincnt,
1896 wfpinmax, cg, phba->cmf_active_info);
1897 } else {
1898 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1899 "6241 BW Threshold %lld%% (%lld): "
1900 "CMF %lld%% %s: cg:%d Info:%u\n",
1901 bwpcent, bw, pcent, s, cg,
1902 phba->cmf_active_info);
1903 }
1904 } else if (info) {
1905 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1906 "6246 Info Threshold %u\n", info);
1907 }
1908
1909 /* Save BW change to be picked up during next timer interrupt */
1910 phba->cmf_last_sync_bw = bw;
1911 out:
1912 lpfc_sli_release_iocbq(phba, cmdiocb);
1913 }
1914
1915 /**
1916 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1917 * @phba: Pointer to HBA context object.
1918 * @ms: ms to set in WQE interval, 0 means use init op
1919 * @total: Total rcv bytes for this interval
1920 *
1921 * This routine is called every CMF timer interrupt. Its purpose is
1922 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1923 * that may indicate we have congestion (FPINs or Signals). Upon
1924 * completion, the firmware will indicate any BW restrictions the
1925 * driver may need to take.
1926 **/
1927 int
lpfc_issue_cmf_sync_wqe(struct lpfc_hba * phba,u32 ms,u64 total)1928 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1929 {
1930 union lpfc_wqe128 *wqe;
1931 struct lpfc_iocbq *sync_buf;
1932 unsigned long iflags;
1933 u32 ret_val;
1934 u32 atot, wtot, max;
1935 u8 warn_sync_period = 0;
1936
1937 /* First address any alarm / warning activity */
1938 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1939 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1940
1941 /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1942 if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1943 phba->link_state == LPFC_LINK_DOWN)
1944 return 0;
1945
1946 spin_lock_irqsave(&phba->hbalock, iflags);
1947 sync_buf = __lpfc_sli_get_iocbq(phba);
1948 if (!sync_buf) {
1949 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1950 "6244 No available WQEs for CMF_SYNC_WQE\n");
1951 ret_val = ENOMEM;
1952 goto out_unlock;
1953 }
1954
1955 wqe = &sync_buf->wqe;
1956
1957 /* WQEs are reused. Clear stale data and set key fields to zero */
1958 memset(wqe, 0, sizeof(*wqe));
1959
1960 /* If this is the very first CMF_SYNC_WQE, issue an init operation */
1961 if (!ms) {
1962 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1963 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1964 phba->fc_eventTag);
1965 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1966 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1967 goto initpath;
1968 }
1969
1970 bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1971 bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1972
1973 /* Check for alarms / warnings */
1974 if (atot) {
1975 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1976 /* We hit an Signal alarm condition */
1977 bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1978 } else {
1979 /* We hit a FPIN alarm condition */
1980 bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1981 }
1982 } else if (wtot) {
1983 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1984 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1985 /* We hit an Signal warning condition */
1986 max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1987 lpfc_acqe_cgn_frequency;
1988 bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1989 bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1990 warn_sync_period = lpfc_acqe_cgn_frequency;
1991 } else {
1992 /* We hit a FPIN warning condition */
1993 bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1994 bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1995 if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
1996 warn_sync_period =
1997 LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
1998 }
1999 }
2000
2001 /* Update total read blocks during previous timer interval */
2002 wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
2003
2004 initpath:
2005 bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
2006 wqe->cmf_sync.event_tag = phba->fc_eventTag;
2007 bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
2008
2009 /* Setup reqtag to match the wqe completion. */
2010 bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
2011
2012 bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
2013 bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
2014
2015 bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
2016 bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
2017 bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
2018
2019 sync_buf->vport = phba->pport;
2020 sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
2021 sync_buf->cmd_dmabuf = NULL;
2022 sync_buf->rsp_dmabuf = NULL;
2023 sync_buf->bpl_dmabuf = NULL;
2024 sync_buf->sli4_xritag = NO_XRI;
2025
2026 sync_buf->cmd_flag |= LPFC_IO_CMF;
2027 ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2028 if (ret_val) {
2029 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2030 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2031 ret_val);
2032 __lpfc_sli_release_iocbq(phba, sync_buf);
2033 }
2034 out_unlock:
2035 spin_unlock_irqrestore(&phba->hbalock, iflags);
2036 return ret_val;
2037 }
2038
2039 /**
2040 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2041 * @phba: Pointer to HBA context object.
2042 * @pring: Pointer to driver SLI ring object.
2043 *
2044 * This function is called with hbalock held and the caller must post the
2045 * iocb without releasing the lock. If the caller releases the lock,
2046 * iocb slot returned by the function is not guaranteed to be available.
2047 * The function returns pointer to the next available iocb slot if there
2048 * is available slot in the ring, else it returns NULL.
2049 * If the get index of the ring is ahead of the put index, the function
2050 * will post an error attention event to the worker thread to take the
2051 * HBA to offline state.
2052 **/
2053 static IOCB_t *
lpfc_sli_next_iocb_slot(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2054 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2055 {
2056 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2057 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
2058
2059 lockdep_assert_held(&phba->hbalock);
2060
2061 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2062 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2063 pring->sli.sli3.next_cmdidx = 0;
2064
2065 if (unlikely(pring->sli.sli3.local_getidx ==
2066 pring->sli.sli3.next_cmdidx)) {
2067
2068 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2069
2070 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2071 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2072 "0315 Ring %d issue: portCmdGet %d "
2073 "is bigger than cmd ring %d\n",
2074 pring->ringno,
2075 pring->sli.sli3.local_getidx,
2076 max_cmd_idx);
2077
2078 phba->link_state = LPFC_HBA_ERROR;
2079 /*
2080 * All error attention handlers are posted to
2081 * worker thread
2082 */
2083 phba->work_ha |= HA_ERATT;
2084 phba->work_hs = HS_FFER3;
2085
2086 lpfc_worker_wake_up(phba);
2087
2088 return NULL;
2089 }
2090
2091 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2092 return NULL;
2093 }
2094
2095 return lpfc_cmd_iocb(phba, pring);
2096 }
2097
2098 /**
2099 * lpfc_sli_next_iotag - Get an iotag for the iocb
2100 * @phba: Pointer to HBA context object.
2101 * @iocbq: Pointer to driver iocb object.
2102 *
2103 * This function gets an iotag for the iocb. If there is no unused iotag and
2104 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2105 * array and assigns a new iotag.
2106 * The function returns the allocated iotag if successful, else returns zero.
2107 * Zero is not a valid iotag.
2108 * The caller is not required to hold any lock.
2109 **/
2110 uint16_t
lpfc_sli_next_iotag(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)2111 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2112 {
2113 struct lpfc_iocbq **new_arr;
2114 struct lpfc_iocbq **old_arr;
2115 size_t new_len;
2116 struct lpfc_sli *psli = &phba->sli;
2117 uint16_t iotag;
2118
2119 spin_lock_irq(&phba->hbalock);
2120 iotag = psli->last_iotag;
2121 if(++iotag < psli->iocbq_lookup_len) {
2122 psli->last_iotag = iotag;
2123 psli->iocbq_lookup[iotag] = iocbq;
2124 spin_unlock_irq(&phba->hbalock);
2125 iocbq->iotag = iotag;
2126 return iotag;
2127 } else if (psli->iocbq_lookup_len < (0xffff
2128 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2129 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2130 spin_unlock_irq(&phba->hbalock);
2131 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2132 GFP_KERNEL);
2133 if (new_arr) {
2134 spin_lock_irq(&phba->hbalock);
2135 old_arr = psli->iocbq_lookup;
2136 if (new_len <= psli->iocbq_lookup_len) {
2137 /* highly unprobable case */
2138 kfree(new_arr);
2139 iotag = psli->last_iotag;
2140 if(++iotag < psli->iocbq_lookup_len) {
2141 psli->last_iotag = iotag;
2142 psli->iocbq_lookup[iotag] = iocbq;
2143 spin_unlock_irq(&phba->hbalock);
2144 iocbq->iotag = iotag;
2145 return iotag;
2146 }
2147 spin_unlock_irq(&phba->hbalock);
2148 return 0;
2149 }
2150 if (psli->iocbq_lookup)
2151 memcpy(new_arr, old_arr,
2152 ((psli->last_iotag + 1) *
2153 sizeof (struct lpfc_iocbq *)));
2154 psli->iocbq_lookup = new_arr;
2155 psli->iocbq_lookup_len = new_len;
2156 psli->last_iotag = iotag;
2157 psli->iocbq_lookup[iotag] = iocbq;
2158 spin_unlock_irq(&phba->hbalock);
2159 iocbq->iotag = iotag;
2160 kfree(old_arr);
2161 return iotag;
2162 }
2163 } else
2164 spin_unlock_irq(&phba->hbalock);
2165
2166 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2167 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2168 psli->last_iotag);
2169
2170 return 0;
2171 }
2172
2173 /**
2174 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2175 * @phba: Pointer to HBA context object.
2176 * @pring: Pointer to driver SLI ring object.
2177 * @iocb: Pointer to iocb slot in the ring.
2178 * @nextiocb: Pointer to driver iocb object which need to be
2179 * posted to firmware.
2180 *
2181 * This function is called to post a new iocb to the firmware. This
2182 * function copies the new iocb to ring iocb slot and updates the
2183 * ring pointers. It adds the new iocb to txcmplq if there is
2184 * a completion call back for this iocb else the function will free the
2185 * iocb object. The hbalock is asserted held in the code path calling
2186 * this routine.
2187 **/
2188 static void
lpfc_sli_submit_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,IOCB_t * iocb,struct lpfc_iocbq * nextiocb)2189 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2190 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2191 {
2192 /*
2193 * Set up an iotag
2194 */
2195 nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2196
2197
2198 if (pring->ringno == LPFC_ELS_RING) {
2199 lpfc_debugfs_slow_ring_trc(phba,
2200 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2201 *(((uint32_t *) &nextiocb->iocb) + 4),
2202 *(((uint32_t *) &nextiocb->iocb) + 6),
2203 *(((uint32_t *) &nextiocb->iocb) + 7));
2204 }
2205
2206 /*
2207 * Issue iocb command to adapter
2208 */
2209 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2210 wmb();
2211 pring->stats.iocb_cmd++;
2212
2213 /*
2214 * If there is no completion routine to call, we can release the
2215 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2216 * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2217 */
2218 if (nextiocb->cmd_cmpl)
2219 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2220 else
2221 __lpfc_sli_release_iocbq(phba, nextiocb);
2222
2223 /*
2224 * Let the HBA know what IOCB slot will be the next one the
2225 * driver will put a command into.
2226 */
2227 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2228 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2229 }
2230
2231 /**
2232 * lpfc_sli_update_full_ring - Update the chip attention register
2233 * @phba: Pointer to HBA context object.
2234 * @pring: Pointer to driver SLI ring object.
2235 *
2236 * The caller is not required to hold any lock for calling this function.
2237 * This function updates the chip attention bits for the ring to inform firmware
2238 * that there are pending work to be done for this ring and requests an
2239 * interrupt when there is space available in the ring. This function is
2240 * called when the driver is unable to post more iocbs to the ring due
2241 * to unavailability of space in the ring.
2242 **/
2243 static void
lpfc_sli_update_full_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2244 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2245 {
2246 int ringno = pring->ringno;
2247
2248 pring->flag |= LPFC_CALL_RING_AVAILABLE;
2249
2250 wmb();
2251
2252 /*
2253 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2254 * The HBA will tell us when an IOCB entry is available.
2255 */
2256 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2257 readl(phba->CAregaddr); /* flush */
2258
2259 pring->stats.iocb_cmd_full++;
2260 }
2261
2262 /**
2263 * lpfc_sli_update_ring - Update chip attention register
2264 * @phba: Pointer to HBA context object.
2265 * @pring: Pointer to driver SLI ring object.
2266 *
2267 * This function updates the chip attention register bit for the
2268 * given ring to inform HBA that there is more work to be done
2269 * in this ring. The caller is not required to hold any lock.
2270 **/
2271 static void
lpfc_sli_update_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2272 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2273 {
2274 int ringno = pring->ringno;
2275
2276 /*
2277 * Tell the HBA that there is work to do in this ring.
2278 */
2279 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2280 wmb();
2281 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2282 readl(phba->CAregaddr); /* flush */
2283 }
2284 }
2285
2286 /**
2287 * lpfc_sli_resume_iocb - Process iocbs in the txq
2288 * @phba: Pointer to HBA context object.
2289 * @pring: Pointer to driver SLI ring object.
2290 *
2291 * This function is called with hbalock held to post pending iocbs
2292 * in the txq to the firmware. This function is called when driver
2293 * detects space available in the ring.
2294 **/
2295 static void
lpfc_sli_resume_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2296 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2297 {
2298 IOCB_t *iocb;
2299 struct lpfc_iocbq *nextiocb;
2300
2301 lockdep_assert_held(&phba->hbalock);
2302
2303 /*
2304 * Check to see if:
2305 * (a) there is anything on the txq to send
2306 * (b) link is up
2307 * (c) link attention events can be processed (fcp ring only)
2308 * (d) IOCB processing is not blocked by the outstanding mbox command.
2309 */
2310
2311 if (lpfc_is_link_up(phba) &&
2312 (!list_empty(&pring->txq)) &&
2313 (pring->ringno != LPFC_FCP_RING ||
2314 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2315
2316 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2317 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2318 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2319
2320 if (iocb)
2321 lpfc_sli_update_ring(phba, pring);
2322 else
2323 lpfc_sli_update_full_ring(phba, pring);
2324 }
2325
2326 return;
2327 }
2328
2329 /**
2330 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2331 * @phba: Pointer to HBA context object.
2332 * @hbqno: HBQ number.
2333 *
2334 * This function is called with hbalock held to get the next
2335 * available slot for the given HBQ. If there is free slot
2336 * available for the HBQ it will return pointer to the next available
2337 * HBQ entry else it will return NULL.
2338 **/
2339 static struct lpfc_hbq_entry *
lpfc_sli_next_hbq_slot(struct lpfc_hba * phba,uint32_t hbqno)2340 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2341 {
2342 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2343
2344 lockdep_assert_held(&phba->hbalock);
2345
2346 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2347 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2348 hbqp->next_hbqPutIdx = 0;
2349
2350 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2351 uint32_t raw_index = phba->hbq_get[hbqno];
2352 uint32_t getidx = le32_to_cpu(raw_index);
2353
2354 hbqp->local_hbqGetIdx = getidx;
2355
2356 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2357 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2358 "1802 HBQ %d: local_hbqGetIdx "
2359 "%u is > than hbqp->entry_count %u\n",
2360 hbqno, hbqp->local_hbqGetIdx,
2361 hbqp->entry_count);
2362
2363 phba->link_state = LPFC_HBA_ERROR;
2364 return NULL;
2365 }
2366
2367 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2368 return NULL;
2369 }
2370
2371 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2372 hbqp->hbqPutIdx;
2373 }
2374
2375 /**
2376 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2377 * @phba: Pointer to HBA context object.
2378 *
2379 * This function is called with no lock held to free all the
2380 * hbq buffers while uninitializing the SLI interface. It also
2381 * frees the HBQ buffers returned by the firmware but not yet
2382 * processed by the upper layers.
2383 **/
2384 void
lpfc_sli_hbqbuf_free_all(struct lpfc_hba * phba)2385 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2386 {
2387 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2388 struct hbq_dmabuf *hbq_buf;
2389 unsigned long flags;
2390 int i, hbq_count;
2391
2392 hbq_count = lpfc_sli_hbq_count();
2393 /* Return all memory used by all HBQs */
2394 spin_lock_irqsave(&phba->hbalock, flags);
2395 for (i = 0; i < hbq_count; ++i) {
2396 list_for_each_entry_safe(dmabuf, next_dmabuf,
2397 &phba->hbqs[i].hbq_buffer_list, list) {
2398 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2399 list_del(&hbq_buf->dbuf.list);
2400 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2401 }
2402 phba->hbqs[i].buffer_count = 0;
2403 }
2404
2405 /* Mark the HBQs not in use */
2406 phba->hbq_in_use = 0;
2407 spin_unlock_irqrestore(&phba->hbalock, flags);
2408 }
2409
2410 /**
2411 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2412 * @phba: Pointer to HBA context object.
2413 * @hbqno: HBQ number.
2414 * @hbq_buf: Pointer to HBQ buffer.
2415 *
2416 * This function is called with the hbalock held to post a
2417 * hbq buffer to the firmware. If the function finds an empty
2418 * slot in the HBQ, it will post the buffer. The function will return
2419 * pointer to the hbq entry if it successfully post the buffer
2420 * else it will return NULL.
2421 **/
2422 static int
lpfc_sli_hbq_to_firmware(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2423 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2424 struct hbq_dmabuf *hbq_buf)
2425 {
2426 lockdep_assert_held(&phba->hbalock);
2427 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2428 }
2429
2430 /**
2431 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2432 * @phba: Pointer to HBA context object.
2433 * @hbqno: HBQ number.
2434 * @hbq_buf: Pointer to HBQ buffer.
2435 *
2436 * This function is called with the hbalock held to post a hbq buffer to the
2437 * firmware. If the function finds an empty slot in the HBQ, it will post the
2438 * buffer and place it on the hbq_buffer_list. The function will return zero if
2439 * it successfully post the buffer else it will return an error.
2440 **/
2441 static int
lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2442 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2443 struct hbq_dmabuf *hbq_buf)
2444 {
2445 struct lpfc_hbq_entry *hbqe;
2446 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2447
2448 lockdep_assert_held(&phba->hbalock);
2449 /* Get next HBQ entry slot to use */
2450 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2451 if (hbqe) {
2452 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2453
2454 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2455 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2456 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2457 hbqe->bde.tus.f.bdeFlags = 0;
2458 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2459 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2460 /* Sync SLIM */
2461 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2462 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2463 /* flush */
2464 readl(phba->hbq_put + hbqno);
2465 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2466 return 0;
2467 } else
2468 return -ENOMEM;
2469 }
2470
2471 /**
2472 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2473 * @phba: Pointer to HBA context object.
2474 * @hbqno: HBQ number.
2475 * @hbq_buf: Pointer to HBQ buffer.
2476 *
2477 * This function is called with the hbalock held to post an RQE to the SLI4
2478 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2479 * the hbq_buffer_list and return zero, otherwise it will return an error.
2480 **/
2481 static int
lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2482 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2483 struct hbq_dmabuf *hbq_buf)
2484 {
2485 int rc;
2486 struct lpfc_rqe hrqe;
2487 struct lpfc_rqe drqe;
2488 struct lpfc_queue *hrq;
2489 struct lpfc_queue *drq;
2490
2491 if (hbqno != LPFC_ELS_HBQ)
2492 return 1;
2493 hrq = phba->sli4_hba.hdr_rq;
2494 drq = phba->sli4_hba.dat_rq;
2495
2496 lockdep_assert_held(&phba->hbalock);
2497 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2498 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2499 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2500 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2501 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2502 if (rc < 0)
2503 return rc;
2504 hbq_buf->tag = (rc | (hbqno << 16));
2505 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2506 return 0;
2507 }
2508
2509 /* HBQ for ELS and CT traffic. */
2510 static struct lpfc_hbq_init lpfc_els_hbq = {
2511 .rn = 1,
2512 .entry_count = 256,
2513 .mask_count = 0,
2514 .profile = 0,
2515 .ring_mask = (1 << LPFC_ELS_RING),
2516 .buffer_count = 0,
2517 .init_count = 40,
2518 .add_count = 40,
2519 };
2520
2521 /* Array of HBQs */
2522 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2523 &lpfc_els_hbq,
2524 };
2525
2526 /**
2527 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2528 * @phba: Pointer to HBA context object.
2529 * @hbqno: HBQ number.
2530 * @count: Number of HBQ buffers to be posted.
2531 *
2532 * This function is called with no lock held to post more hbq buffers to the
2533 * given HBQ. The function returns the number of HBQ buffers successfully
2534 * posted.
2535 **/
2536 static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba * phba,uint32_t hbqno,uint32_t count)2537 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2538 {
2539 uint32_t i, posted = 0;
2540 unsigned long flags;
2541 struct hbq_dmabuf *hbq_buffer;
2542 LIST_HEAD(hbq_buf_list);
2543 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2544 return 0;
2545
2546 if ((phba->hbqs[hbqno].buffer_count + count) >
2547 lpfc_hbq_defs[hbqno]->entry_count)
2548 count = lpfc_hbq_defs[hbqno]->entry_count -
2549 phba->hbqs[hbqno].buffer_count;
2550 if (!count)
2551 return 0;
2552 /* Allocate HBQ entries */
2553 for (i = 0; i < count; i++) {
2554 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2555 if (!hbq_buffer)
2556 break;
2557 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2558 }
2559 /* Check whether HBQ is still in use */
2560 spin_lock_irqsave(&phba->hbalock, flags);
2561 if (!phba->hbq_in_use)
2562 goto err;
2563 while (!list_empty(&hbq_buf_list)) {
2564 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2565 dbuf.list);
2566 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2567 (hbqno << 16));
2568 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2569 phba->hbqs[hbqno].buffer_count++;
2570 posted++;
2571 } else
2572 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2573 }
2574 spin_unlock_irqrestore(&phba->hbalock, flags);
2575 return posted;
2576 err:
2577 spin_unlock_irqrestore(&phba->hbalock, flags);
2578 while (!list_empty(&hbq_buf_list)) {
2579 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2580 dbuf.list);
2581 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2582 }
2583 return 0;
2584 }
2585
2586 /**
2587 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2588 * @phba: Pointer to HBA context object.
2589 * @qno: HBQ number.
2590 *
2591 * This function posts more buffers to the HBQ. This function
2592 * is called with no lock held. The function returns the number of HBQ entries
2593 * successfully allocated.
2594 **/
2595 int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba * phba,uint32_t qno)2596 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2597 {
2598 if (phba->sli_rev == LPFC_SLI_REV4)
2599 return 0;
2600 else
2601 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2602 lpfc_hbq_defs[qno]->add_count);
2603 }
2604
2605 /**
2606 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2607 * @phba: Pointer to HBA context object.
2608 * @qno: HBQ queue number.
2609 *
2610 * This function is called from SLI initialization code path with
2611 * no lock held to post initial HBQ buffers to firmware. The
2612 * function returns the number of HBQ entries successfully allocated.
2613 **/
2614 static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba * phba,uint32_t qno)2615 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2616 {
2617 if (phba->sli_rev == LPFC_SLI_REV4)
2618 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2619 lpfc_hbq_defs[qno]->entry_count);
2620 else
2621 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2622 lpfc_hbq_defs[qno]->init_count);
2623 }
2624
2625 /*
2626 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2627 *
2628 * This function removes the first hbq buffer on an hbq list and returns a
2629 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2630 **/
2631 static struct hbq_dmabuf *
lpfc_sli_hbqbuf_get(struct list_head * rb_list)2632 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2633 {
2634 struct lpfc_dmabuf *d_buf;
2635
2636 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2637 if (!d_buf)
2638 return NULL;
2639 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2640 }
2641
2642 /**
2643 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2644 * @phba: Pointer to HBA context object.
2645 * @hrq: HBQ number.
2646 *
2647 * This function removes the first RQ buffer on an RQ buffer list and returns a
2648 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2649 **/
2650 static struct rqb_dmabuf *
lpfc_sli_rqbuf_get(struct lpfc_hba * phba,struct lpfc_queue * hrq)2651 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2652 {
2653 struct lpfc_dmabuf *h_buf;
2654 struct lpfc_rqb *rqbp;
2655
2656 rqbp = hrq->rqbp;
2657 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2658 struct lpfc_dmabuf, list);
2659 if (!h_buf)
2660 return NULL;
2661 rqbp->buffer_count--;
2662 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2663 }
2664
2665 /**
2666 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2667 * @phba: Pointer to HBA context object.
2668 * @tag: Tag of the hbq buffer.
2669 *
2670 * This function searches for the hbq buffer associated with the given tag in
2671 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2672 * otherwise it returns NULL.
2673 **/
2674 static struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba * phba,uint32_t tag)2675 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2676 {
2677 struct lpfc_dmabuf *d_buf;
2678 struct hbq_dmabuf *hbq_buf;
2679 uint32_t hbqno;
2680
2681 hbqno = tag >> 16;
2682 if (hbqno >= LPFC_MAX_HBQS)
2683 return NULL;
2684
2685 spin_lock_irq(&phba->hbalock);
2686 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2687 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2688 if (hbq_buf->tag == tag) {
2689 spin_unlock_irq(&phba->hbalock);
2690 return hbq_buf;
2691 }
2692 }
2693 spin_unlock_irq(&phba->hbalock);
2694 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2695 "1803 Bad hbq tag. Data: x%x x%x\n",
2696 tag, phba->hbqs[tag >> 16].buffer_count);
2697 return NULL;
2698 }
2699
2700 /**
2701 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2702 * @phba: Pointer to HBA context object.
2703 * @hbq_buffer: Pointer to HBQ buffer.
2704 *
2705 * This function is called with hbalock. This function gives back
2706 * the hbq buffer to firmware. If the HBQ does not have space to
2707 * post the buffer, it will free the buffer.
2708 **/
2709 void
lpfc_sli_free_hbq(struct lpfc_hba * phba,struct hbq_dmabuf * hbq_buffer)2710 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2711 {
2712 uint32_t hbqno;
2713
2714 if (hbq_buffer) {
2715 hbqno = hbq_buffer->tag >> 16;
2716 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2717 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2718 }
2719 }
2720
2721 /**
2722 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2723 * @mbxCommand: mailbox command code.
2724 *
2725 * This function is called by the mailbox event handler function to verify
2726 * that the completed mailbox command is a legitimate mailbox command. If the
2727 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2728 * and the mailbox event handler will take the HBA offline.
2729 **/
2730 static int
lpfc_sli_chk_mbx_command(uint8_t mbxCommand)2731 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2732 {
2733 uint8_t ret;
2734
2735 switch (mbxCommand) {
2736 case MBX_LOAD_SM:
2737 case MBX_READ_NV:
2738 case MBX_WRITE_NV:
2739 case MBX_WRITE_VPARMS:
2740 case MBX_RUN_BIU_DIAG:
2741 case MBX_INIT_LINK:
2742 case MBX_DOWN_LINK:
2743 case MBX_CONFIG_LINK:
2744 case MBX_CONFIG_RING:
2745 case MBX_RESET_RING:
2746 case MBX_READ_CONFIG:
2747 case MBX_READ_RCONFIG:
2748 case MBX_READ_SPARM:
2749 case MBX_READ_STATUS:
2750 case MBX_READ_RPI:
2751 case MBX_READ_XRI:
2752 case MBX_READ_REV:
2753 case MBX_READ_LNK_STAT:
2754 case MBX_REG_LOGIN:
2755 case MBX_UNREG_LOGIN:
2756 case MBX_CLEAR_LA:
2757 case MBX_DUMP_MEMORY:
2758 case MBX_DUMP_CONTEXT:
2759 case MBX_RUN_DIAGS:
2760 case MBX_RESTART:
2761 case MBX_UPDATE_CFG:
2762 case MBX_DOWN_LOAD:
2763 case MBX_DEL_LD_ENTRY:
2764 case MBX_RUN_PROGRAM:
2765 case MBX_SET_MASK:
2766 case MBX_SET_VARIABLE:
2767 case MBX_UNREG_D_ID:
2768 case MBX_KILL_BOARD:
2769 case MBX_CONFIG_FARP:
2770 case MBX_BEACON:
2771 case MBX_LOAD_AREA:
2772 case MBX_RUN_BIU_DIAG64:
2773 case MBX_CONFIG_PORT:
2774 case MBX_READ_SPARM64:
2775 case MBX_READ_RPI64:
2776 case MBX_REG_LOGIN64:
2777 case MBX_READ_TOPOLOGY:
2778 case MBX_WRITE_WWN:
2779 case MBX_SET_DEBUG:
2780 case MBX_LOAD_EXP_ROM:
2781 case MBX_ASYNCEVT_ENABLE:
2782 case MBX_REG_VPI:
2783 case MBX_UNREG_VPI:
2784 case MBX_HEARTBEAT:
2785 case MBX_PORT_CAPABILITIES:
2786 case MBX_PORT_IOV_CONTROL:
2787 case MBX_SLI4_CONFIG:
2788 case MBX_SLI4_REQ_FTRS:
2789 case MBX_REG_FCFI:
2790 case MBX_UNREG_FCFI:
2791 case MBX_REG_VFI:
2792 case MBX_UNREG_VFI:
2793 case MBX_INIT_VPI:
2794 case MBX_INIT_VFI:
2795 case MBX_RESUME_RPI:
2796 case MBX_READ_EVENT_LOG_STATUS:
2797 case MBX_READ_EVENT_LOG:
2798 case MBX_SECURITY_MGMT:
2799 case MBX_AUTH_PORT:
2800 case MBX_ACCESS_VDATA:
2801 ret = mbxCommand;
2802 break;
2803 default:
2804 ret = MBX_SHUTDOWN;
2805 break;
2806 }
2807 return ret;
2808 }
2809
2810 /**
2811 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2812 * @phba: Pointer to HBA context object.
2813 * @pmboxq: Pointer to mailbox command.
2814 *
2815 * This is completion handler function for mailbox commands issued from
2816 * lpfc_sli_issue_mbox_wait function. This function is called by the
2817 * mailbox event handler function with no lock held. This function
2818 * will wake up thread waiting on the wait queue pointed by context1
2819 * of the mailbox.
2820 **/
2821 void
lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)2822 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2823 {
2824 unsigned long drvr_flag;
2825 struct completion *pmbox_done;
2826
2827 /*
2828 * If pmbox_done is empty, the driver thread gave up waiting and
2829 * continued running.
2830 */
2831 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2832 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2833 pmbox_done = (struct completion *)pmboxq->context3;
2834 if (pmbox_done)
2835 complete(pmbox_done);
2836 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2837 return;
2838 }
2839
2840 static void
__lpfc_sli_rpi_release(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)2841 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2842 {
2843 unsigned long iflags;
2844
2845 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2846 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2847 spin_lock_irqsave(&ndlp->lock, iflags);
2848 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2849 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2850 spin_unlock_irqrestore(&ndlp->lock, iflags);
2851 }
2852 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2853 }
2854
2855 void
lpfc_sli_rpi_release(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)2856 lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2857 {
2858 __lpfc_sli_rpi_release(vport, ndlp);
2859 }
2860
2861 /**
2862 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2863 * @phba: Pointer to HBA context object.
2864 * @pmb: Pointer to mailbox object.
2865 *
2866 * This function is the default mailbox completion handler. It
2867 * frees the memory resources associated with the completed mailbox
2868 * command. If the completed command is a REG_LOGIN mailbox command,
2869 * this function will issue a UREG_LOGIN to re-claim the RPI.
2870 **/
2871 void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)2872 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2873 {
2874 struct lpfc_vport *vport = pmb->vport;
2875 struct lpfc_dmabuf *mp;
2876 struct lpfc_nodelist *ndlp;
2877 struct Scsi_Host *shost;
2878 uint16_t rpi, vpi;
2879 int rc;
2880
2881 /*
2882 * If a REG_LOGIN succeeded after node is destroyed or node
2883 * is in re-discovery driver need to cleanup the RPI.
2884 */
2885 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2886 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2887 !pmb->u.mb.mbxStatus) {
2888 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
2889 if (mp) {
2890 pmb->ctx_buf = NULL;
2891 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2892 kfree(mp);
2893 }
2894 rpi = pmb->u.mb.un.varWords[0];
2895 vpi = pmb->u.mb.un.varRegLogin.vpi;
2896 if (phba->sli_rev == LPFC_SLI_REV4)
2897 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2898 lpfc_unreg_login(phba, vpi, rpi, pmb);
2899 pmb->vport = vport;
2900 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2901 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2902 if (rc != MBX_NOT_FINISHED)
2903 return;
2904 }
2905
2906 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2907 !(phba->pport->load_flag & FC_UNLOADING) &&
2908 !pmb->u.mb.mbxStatus) {
2909 shost = lpfc_shost_from_vport(vport);
2910 spin_lock_irq(shost->host_lock);
2911 vport->vpi_state |= LPFC_VPI_REGISTERED;
2912 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2913 spin_unlock_irq(shost->host_lock);
2914 }
2915
2916 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2917 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2918 lpfc_nlp_put(ndlp);
2919 }
2920
2921 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2922 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2923
2924 /* Check to see if there are any deferred events to process */
2925 if (ndlp) {
2926 lpfc_printf_vlog(
2927 vport,
2928 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2929 "1438 UNREG cmpl deferred mbox x%x "
2930 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2931 ndlp->nlp_rpi, ndlp->nlp_DID,
2932 ndlp->nlp_flag, ndlp->nlp_defer_did,
2933 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2934
2935 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2936 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2937 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2938 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2939 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2940 } else {
2941 __lpfc_sli_rpi_release(vport, ndlp);
2942 }
2943
2944 /* The unreg_login mailbox is complete and had a
2945 * reference that has to be released. The PLOGI
2946 * got its own ref.
2947 */
2948 lpfc_nlp_put(ndlp);
2949 pmb->ctx_ndlp = NULL;
2950 }
2951 }
2952
2953 /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2954 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2955 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2956 lpfc_nlp_put(ndlp);
2957 }
2958
2959 /* Check security permission status on INIT_LINK mailbox command */
2960 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2961 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2963 "2860 SLI authentication is required "
2964 "for INIT_LINK but has not done yet\n");
2965
2966 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2967 lpfc_sli4_mbox_cmd_free(phba, pmb);
2968 else
2969 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2970 }
2971 /**
2972 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2973 * @phba: Pointer to HBA context object.
2974 * @pmb: Pointer to mailbox object.
2975 *
2976 * This function is the unreg rpi mailbox completion handler. It
2977 * frees the memory resources associated with the completed mailbox
2978 * command. An additional reference is put on the ndlp to prevent
2979 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2980 * the unreg mailbox command completes, this routine puts the
2981 * reference back.
2982 *
2983 **/
2984 void
lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)2985 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2986 {
2987 struct lpfc_vport *vport = pmb->vport;
2988 struct lpfc_nodelist *ndlp;
2989
2990 ndlp = pmb->ctx_ndlp;
2991 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2992 if (phba->sli_rev == LPFC_SLI_REV4 &&
2993 (bf_get(lpfc_sli_intf_if_type,
2994 &phba->sli4_hba.sli_intf) >=
2995 LPFC_SLI_INTF_IF_TYPE_2)) {
2996 if (ndlp) {
2997 lpfc_printf_vlog(
2998 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2999 "0010 UNREG_LOGIN vpi:%x "
3000 "rpi:%x DID:%x defer x%x flg x%x "
3001 "x%px\n",
3002 vport->vpi, ndlp->nlp_rpi,
3003 ndlp->nlp_DID, ndlp->nlp_defer_did,
3004 ndlp->nlp_flag,
3005 ndlp);
3006 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3007
3008 /* Check to see if there are any deferred
3009 * events to process
3010 */
3011 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
3012 (ndlp->nlp_defer_did !=
3013 NLP_EVT_NOTHING_PENDING)) {
3014 lpfc_printf_vlog(
3015 vport, KERN_INFO, LOG_DISCOVERY,
3016 "4111 UNREG cmpl deferred "
3017 "clr x%x on "
3018 "NPort x%x Data: x%x x%px\n",
3019 ndlp->nlp_rpi, ndlp->nlp_DID,
3020 ndlp->nlp_defer_did, ndlp);
3021 ndlp->nlp_flag &= ~NLP_UNREG_INP;
3022 ndlp->nlp_defer_did =
3023 NLP_EVT_NOTHING_PENDING;
3024 lpfc_issue_els_plogi(
3025 vport, ndlp->nlp_DID, 0);
3026 } else {
3027 __lpfc_sli_rpi_release(vport, ndlp);
3028 }
3029 lpfc_nlp_put(ndlp);
3030 }
3031 }
3032 }
3033
3034 mempool_free(pmb, phba->mbox_mem_pool);
3035 }
3036
3037 /**
3038 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3039 * @phba: Pointer to HBA context object.
3040 *
3041 * This function is called with no lock held. This function processes all
3042 * the completed mailbox commands and gives it to upper layers. The interrupt
3043 * service routine processes mailbox completion interrupt and adds completed
3044 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3045 * Worker thread call lpfc_sli_handle_mb_event, which will return the
3046 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3047 * function returns the mailbox commands to the upper layer by calling the
3048 * completion handler function of each mailbox.
3049 **/
3050 int
lpfc_sli_handle_mb_event(struct lpfc_hba * phba)3051 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3052 {
3053 MAILBOX_t *pmbox;
3054 LPFC_MBOXQ_t *pmb;
3055 int rc;
3056 LIST_HEAD(cmplq);
3057
3058 phba->sli.slistat.mbox_event++;
3059
3060 /* Get all completed mailboxe buffers into the cmplq */
3061 spin_lock_irq(&phba->hbalock);
3062 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3063 spin_unlock_irq(&phba->hbalock);
3064
3065 /* Get a Mailbox buffer to setup mailbox commands for callback */
3066 do {
3067 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3068 if (pmb == NULL)
3069 break;
3070
3071 pmbox = &pmb->u.mb;
3072
3073 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3074 if (pmb->vport) {
3075 lpfc_debugfs_disc_trc(pmb->vport,
3076 LPFC_DISC_TRC_MBOX_VPORT,
3077 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3078 (uint32_t)pmbox->mbxCommand,
3079 pmbox->un.varWords[0],
3080 pmbox->un.varWords[1]);
3081 }
3082 else {
3083 lpfc_debugfs_disc_trc(phba->pport,
3084 LPFC_DISC_TRC_MBOX,
3085 "MBOX cmpl: cmd:x%x mb:x%x x%x",
3086 (uint32_t)pmbox->mbxCommand,
3087 pmbox->un.varWords[0],
3088 pmbox->un.varWords[1]);
3089 }
3090 }
3091
3092 /*
3093 * It is a fatal error if unknown mbox command completion.
3094 */
3095 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3096 MBX_SHUTDOWN) {
3097 /* Unknown mailbox command compl */
3098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3099 "(%d):0323 Unknown Mailbox command "
3100 "x%x (x%x/x%x) Cmpl\n",
3101 pmb->vport ? pmb->vport->vpi :
3102 LPFC_VPORT_UNKNOWN,
3103 pmbox->mbxCommand,
3104 lpfc_sli_config_mbox_subsys_get(phba,
3105 pmb),
3106 lpfc_sli_config_mbox_opcode_get(phba,
3107 pmb));
3108 phba->link_state = LPFC_HBA_ERROR;
3109 phba->work_hs = HS_FFER3;
3110 lpfc_handle_eratt(phba);
3111 continue;
3112 }
3113
3114 if (pmbox->mbxStatus) {
3115 phba->sli.slistat.mbox_stat_err++;
3116 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3117 /* Mbox cmd cmpl error - RETRYing */
3118 lpfc_printf_log(phba, KERN_INFO,
3119 LOG_MBOX | LOG_SLI,
3120 "(%d):0305 Mbox cmd cmpl "
3121 "error - RETRYing Data: x%x "
3122 "(x%x/x%x) x%x x%x x%x\n",
3123 pmb->vport ? pmb->vport->vpi :
3124 LPFC_VPORT_UNKNOWN,
3125 pmbox->mbxCommand,
3126 lpfc_sli_config_mbox_subsys_get(phba,
3127 pmb),
3128 lpfc_sli_config_mbox_opcode_get(phba,
3129 pmb),
3130 pmbox->mbxStatus,
3131 pmbox->un.varWords[0],
3132 pmb->vport ? pmb->vport->port_state :
3133 LPFC_VPORT_UNKNOWN);
3134 pmbox->mbxStatus = 0;
3135 pmbox->mbxOwner = OWN_HOST;
3136 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3137 if (rc != MBX_NOT_FINISHED)
3138 continue;
3139 }
3140 }
3141
3142 /* Mailbox cmd <cmd> Cmpl <cmpl> */
3143 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3144 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3145 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3146 "x%x x%x x%x\n",
3147 pmb->vport ? pmb->vport->vpi : 0,
3148 pmbox->mbxCommand,
3149 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3150 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3151 pmb->mbox_cmpl,
3152 *((uint32_t *) pmbox),
3153 pmbox->un.varWords[0],
3154 pmbox->un.varWords[1],
3155 pmbox->un.varWords[2],
3156 pmbox->un.varWords[3],
3157 pmbox->un.varWords[4],
3158 pmbox->un.varWords[5],
3159 pmbox->un.varWords[6],
3160 pmbox->un.varWords[7],
3161 pmbox->un.varWords[8],
3162 pmbox->un.varWords[9],
3163 pmbox->un.varWords[10]);
3164
3165 if (pmb->mbox_cmpl)
3166 pmb->mbox_cmpl(phba,pmb);
3167 } while (1);
3168 return 0;
3169 }
3170
3171 /**
3172 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3173 * @phba: Pointer to HBA context object.
3174 * @pring: Pointer to driver SLI ring object.
3175 * @tag: buffer tag.
3176 *
3177 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3178 * is set in the tag the buffer is posted for a particular exchange,
3179 * the function will return the buffer without replacing the buffer.
3180 * If the buffer is for unsolicited ELS or CT traffic, this function
3181 * returns the buffer and also posts another buffer to the firmware.
3182 **/
3183 static struct lpfc_dmabuf *
lpfc_sli_get_buff(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t tag)3184 lpfc_sli_get_buff(struct lpfc_hba *phba,
3185 struct lpfc_sli_ring *pring,
3186 uint32_t tag)
3187 {
3188 struct hbq_dmabuf *hbq_entry;
3189
3190 if (tag & QUE_BUFTAG_BIT)
3191 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3192 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3193 if (!hbq_entry)
3194 return NULL;
3195 return &hbq_entry->dbuf;
3196 }
3197
3198 /**
3199 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3200 * containing a NVME LS request.
3201 * @phba: pointer to lpfc hba data structure.
3202 * @piocb: pointer to the iocbq struct representing the sequence starting
3203 * frame.
3204 *
3205 * This routine initially validates the NVME LS, validates there is a login
3206 * with the port that sent the LS, and then calls the appropriate nvme host
3207 * or target LS request handler.
3208 **/
3209 static void
lpfc_nvme_unsol_ls_handler(struct lpfc_hba * phba,struct lpfc_iocbq * piocb)3210 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3211 {
3212 struct lpfc_nodelist *ndlp;
3213 struct lpfc_dmabuf *d_buf;
3214 struct hbq_dmabuf *nvmebuf;
3215 struct fc_frame_header *fc_hdr;
3216 struct lpfc_async_xchg_ctx *axchg = NULL;
3217 char *failwhy = NULL;
3218 uint32_t oxid, sid, did, fctl, size;
3219 int ret = 1;
3220
3221 d_buf = piocb->cmd_dmabuf;
3222
3223 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3224 fc_hdr = nvmebuf->hbuf.virt;
3225 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3226 sid = sli4_sid_from_fc_hdr(fc_hdr);
3227 did = sli4_did_from_fc_hdr(fc_hdr);
3228 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3229 fc_hdr->fh_f_ctl[1] << 8 |
3230 fc_hdr->fh_f_ctl[2]);
3231 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3232
3233 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
3234 oxid, size, sid);
3235
3236 if (phba->pport->load_flag & FC_UNLOADING) {
3237 failwhy = "Driver Unloading";
3238 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3239 failwhy = "NVME FC4 Disabled";
3240 } else if (!phba->nvmet_support && !phba->pport->localport) {
3241 failwhy = "No Localport";
3242 } else if (phba->nvmet_support && !phba->targetport) {
3243 failwhy = "No Targetport";
3244 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3245 failwhy = "Bad NVME LS R_CTL";
3246 } else if (unlikely((fctl & 0x00FF0000) !=
3247 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3248 failwhy = "Bad NVME LS F_CTL";
3249 } else {
3250 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3251 if (!axchg)
3252 failwhy = "No CTX memory";
3253 }
3254
3255 if (unlikely(failwhy)) {
3256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3257 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3258 sid, oxid, failwhy);
3259 goto out_fail;
3260 }
3261
3262 /* validate the source of the LS is logged in */
3263 ndlp = lpfc_findnode_did(phba->pport, sid);
3264 if (!ndlp ||
3265 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3266 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3267 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3268 "6216 NVME Unsol rcv: No ndlp: "
3269 "NPort_ID x%x oxid x%x\n",
3270 sid, oxid);
3271 goto out_fail;
3272 }
3273
3274 axchg->phba = phba;
3275 axchg->ndlp = ndlp;
3276 axchg->size = size;
3277 axchg->oxid = oxid;
3278 axchg->sid = sid;
3279 axchg->wqeq = NULL;
3280 axchg->state = LPFC_NVME_STE_LS_RCV;
3281 axchg->entry_cnt = 1;
3282 axchg->rqb_buffer = (void *)nvmebuf;
3283 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3284 axchg->payload = nvmebuf->dbuf.virt;
3285 INIT_LIST_HEAD(&axchg->list);
3286
3287 if (phba->nvmet_support) {
3288 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3289 spin_lock_irq(&ndlp->lock);
3290 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3291 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3292 spin_unlock_irq(&ndlp->lock);
3293
3294 /* This reference is a single occurrence to hold the
3295 * node valid until the nvmet transport calls
3296 * host_release.
3297 */
3298 if (!lpfc_nlp_get(ndlp))
3299 goto out_fail;
3300
3301 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3302 "6206 NVMET unsol ls_req ndlp x%px "
3303 "DID x%x xflags x%x refcnt %d\n",
3304 ndlp, ndlp->nlp_DID,
3305 ndlp->fc4_xpt_flags,
3306 kref_read(&ndlp->kref));
3307 } else {
3308 spin_unlock_irq(&ndlp->lock);
3309 }
3310 } else {
3311 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3312 }
3313
3314 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3315 if (!ret)
3316 return;
3317
3318 out_fail:
3319 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3320 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3321 "NVMe%s handler failed %d\n",
3322 did, sid, oxid,
3323 (phba->nvmet_support) ? "T" : "I", ret);
3324
3325 /* recycle receive buffer */
3326 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3327
3328 /* If start of new exchange, abort it */
3329 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3330 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3331
3332 if (ret)
3333 kfree(axchg);
3334 }
3335
3336 /**
3337 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3338 * @phba: Pointer to HBA context object.
3339 * @pring: Pointer to driver SLI ring object.
3340 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3341 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3342 * @fch_type: the type for the first frame of the sequence.
3343 *
3344 * This function is called with no lock held. This function uses the r_ctl and
3345 * type of the received sequence to find the correct callback function to call
3346 * to process the sequence.
3347 **/
3348 static int
lpfc_complete_unsol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq,uint32_t fch_r_ctl,uint32_t fch_type)3349 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3350 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3351 uint32_t fch_type)
3352 {
3353 int i;
3354
3355 switch (fch_type) {
3356 case FC_TYPE_NVME:
3357 lpfc_nvme_unsol_ls_handler(phba, saveq);
3358 return 1;
3359 default:
3360 break;
3361 }
3362
3363 /* unSolicited Responses */
3364 if (pring->prt[0].profile) {
3365 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3366 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3367 saveq);
3368 return 1;
3369 }
3370 /* We must search, based on rctl / type
3371 for the right routine */
3372 for (i = 0; i < pring->num_mask; i++) {
3373 if ((pring->prt[i].rctl == fch_r_ctl) &&
3374 (pring->prt[i].type == fch_type)) {
3375 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3376 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3377 (phba, pring, saveq);
3378 return 1;
3379 }
3380 }
3381 return 0;
3382 }
3383
3384 static void
lpfc_sli_prep_unsol_wqe(struct lpfc_hba * phba,struct lpfc_iocbq * saveq)3385 lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3386 struct lpfc_iocbq *saveq)
3387 {
3388 IOCB_t *irsp;
3389 union lpfc_wqe128 *wqe;
3390 u16 i = 0;
3391
3392 irsp = &saveq->iocb;
3393 wqe = &saveq->wqe;
3394
3395 /* Fill wcqe with the IOCB status fields */
3396 bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3397 saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3398 saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3399 saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3400
3401 /* Source ID */
3402 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3403
3404 /* rx-id of the response frame */
3405 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3406
3407 /* ox-id of the frame */
3408 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3409 irsp->unsli3.rcvsli3.ox_id);
3410
3411 /* DID */
3412 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3413 irsp->un.rcvels.remoteID);
3414
3415 /* unsol data len */
3416 for (i = 0; i < irsp->ulpBdeCount; i++) {
3417 struct lpfc_hbq_entry *hbqe = NULL;
3418
3419 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3420 if (i == 0) {
3421 hbqe = (struct lpfc_hbq_entry *)
3422 &irsp->un.ulpWord[0];
3423 saveq->wqe.gen_req.bde.tus.f.bdeSize =
3424 hbqe->bde.tus.f.bdeSize;
3425 } else if (i == 1) {
3426 hbqe = (struct lpfc_hbq_entry *)
3427 &irsp->unsli3.sli3Words[4];
3428 saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3429 }
3430 }
3431 }
3432 }
3433
3434 /**
3435 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3436 * @phba: Pointer to HBA context object.
3437 * @pring: Pointer to driver SLI ring object.
3438 * @saveq: Pointer to the unsolicited iocb.
3439 *
3440 * This function is called with no lock held by the ring event handler
3441 * when there is an unsolicited iocb posted to the response ring by the
3442 * firmware. This function gets the buffer associated with the iocbs
3443 * and calls the event handler for the ring. This function handles both
3444 * qring buffers and hbq buffers.
3445 * When the function returns 1 the caller can free the iocb object otherwise
3446 * upper layer functions will free the iocb objects.
3447 **/
3448 static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq)3449 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3450 struct lpfc_iocbq *saveq)
3451 {
3452 IOCB_t * irsp;
3453 WORD5 * w5p;
3454 dma_addr_t paddr;
3455 uint32_t Rctl, Type;
3456 struct lpfc_iocbq *iocbq;
3457 struct lpfc_dmabuf *dmzbuf;
3458
3459 irsp = &saveq->iocb;
3460 saveq->vport = phba->pport;
3461
3462 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3463 if (pring->lpfc_sli_rcv_async_status)
3464 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3465 else
3466 lpfc_printf_log(phba,
3467 KERN_WARNING,
3468 LOG_SLI,
3469 "0316 Ring %d handler: unexpected "
3470 "ASYNC_STATUS iocb received evt_code "
3471 "0x%x\n",
3472 pring->ringno,
3473 irsp->un.asyncstat.evt_code);
3474 return 1;
3475 }
3476
3477 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3478 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3479 if (irsp->ulpBdeCount > 0) {
3480 dmzbuf = lpfc_sli_get_buff(phba, pring,
3481 irsp->un.ulpWord[3]);
3482 lpfc_in_buf_free(phba, dmzbuf);
3483 }
3484
3485 if (irsp->ulpBdeCount > 1) {
3486 dmzbuf = lpfc_sli_get_buff(phba, pring,
3487 irsp->unsli3.sli3Words[3]);
3488 lpfc_in_buf_free(phba, dmzbuf);
3489 }
3490
3491 if (irsp->ulpBdeCount > 2) {
3492 dmzbuf = lpfc_sli_get_buff(phba, pring,
3493 irsp->unsli3.sli3Words[7]);
3494 lpfc_in_buf_free(phba, dmzbuf);
3495 }
3496
3497 return 1;
3498 }
3499
3500 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3501 if (irsp->ulpBdeCount != 0) {
3502 saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
3503 irsp->un.ulpWord[3]);
3504 if (!saveq->cmd_dmabuf)
3505 lpfc_printf_log(phba,
3506 KERN_ERR,
3507 LOG_SLI,
3508 "0341 Ring %d Cannot find buffer for "
3509 "an unsolicited iocb. tag 0x%x\n",
3510 pring->ringno,
3511 irsp->un.ulpWord[3]);
3512 }
3513 if (irsp->ulpBdeCount == 2) {
3514 saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
3515 irsp->unsli3.sli3Words[7]);
3516 if (!saveq->bpl_dmabuf)
3517 lpfc_printf_log(phba,
3518 KERN_ERR,
3519 LOG_SLI,
3520 "0342 Ring %d Cannot find buffer for an"
3521 " unsolicited iocb. tag 0x%x\n",
3522 pring->ringno,
3523 irsp->unsli3.sli3Words[7]);
3524 }
3525 list_for_each_entry(iocbq, &saveq->list, list) {
3526 irsp = &iocbq->iocb;
3527 if (irsp->ulpBdeCount != 0) {
3528 iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
3529 pring,
3530 irsp->un.ulpWord[3]);
3531 if (!iocbq->cmd_dmabuf)
3532 lpfc_printf_log(phba,
3533 KERN_ERR,
3534 LOG_SLI,
3535 "0343 Ring %d Cannot find "
3536 "buffer for an unsolicited iocb"
3537 ". tag 0x%x\n", pring->ringno,
3538 irsp->un.ulpWord[3]);
3539 }
3540 if (irsp->ulpBdeCount == 2) {
3541 iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
3542 pring,
3543 irsp->unsli3.sli3Words[7]);
3544 if (!iocbq->bpl_dmabuf)
3545 lpfc_printf_log(phba,
3546 KERN_ERR,
3547 LOG_SLI,
3548 "0344 Ring %d Cannot find "
3549 "buffer for an unsolicited "
3550 "iocb. tag 0x%x\n",
3551 pring->ringno,
3552 irsp->unsli3.sli3Words[7]);
3553 }
3554 }
3555 } else {
3556 paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3557 irsp->un.cont64[0].addrLow);
3558 saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
3559 paddr);
3560 if (irsp->ulpBdeCount == 2) {
3561 paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3562 irsp->un.cont64[1].addrLow);
3563 saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
3564 pring,
3565 paddr);
3566 }
3567 }
3568
3569 if (irsp->ulpBdeCount != 0 &&
3570 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3571 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3572 int found = 0;
3573
3574 /* search continue save q for same XRI */
3575 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3576 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3577 saveq->iocb.unsli3.rcvsli3.ox_id) {
3578 list_add_tail(&saveq->list, &iocbq->list);
3579 found = 1;
3580 break;
3581 }
3582 }
3583 if (!found)
3584 list_add_tail(&saveq->clist,
3585 &pring->iocb_continue_saveq);
3586
3587 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3588 list_del_init(&iocbq->clist);
3589 saveq = iocbq;
3590 irsp = &saveq->iocb;
3591 } else {
3592 return 0;
3593 }
3594 }
3595 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3596 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3597 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3598 Rctl = FC_RCTL_ELS_REQ;
3599 Type = FC_TYPE_ELS;
3600 } else {
3601 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3602 Rctl = w5p->hcsw.Rctl;
3603 Type = w5p->hcsw.Type;
3604
3605 /* Firmware Workaround */
3606 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3607 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3608 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3609 Rctl = FC_RCTL_ELS_REQ;
3610 Type = FC_TYPE_ELS;
3611 w5p->hcsw.Rctl = Rctl;
3612 w5p->hcsw.Type = Type;
3613 }
3614 }
3615
3616 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3617 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3618 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3619 if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3620 saveq->vport = phba->pport;
3621 else
3622 saveq->vport = lpfc_find_vport_by_vpid(phba,
3623 irsp->unsli3.rcvsli3.vpi);
3624 }
3625
3626 /* Prepare WQE with Unsol frame */
3627 lpfc_sli_prep_unsol_wqe(phba, saveq);
3628
3629 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3630 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3631 "0313 Ring %d handler: unexpected Rctl x%x "
3632 "Type x%x received\n",
3633 pring->ringno, Rctl, Type);
3634
3635 return 1;
3636 }
3637
3638 /**
3639 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3640 * @phba: Pointer to HBA context object.
3641 * @pring: Pointer to driver SLI ring object.
3642 * @prspiocb: Pointer to response iocb object.
3643 *
3644 * This function looks up the iocb_lookup table to get the command iocb
3645 * corresponding to the given response iocb using the iotag of the
3646 * response iocb. The driver calls this function with the hbalock held
3647 * for SLI3 ports or the ring lock held for SLI4 ports.
3648 * This function returns the command iocb object if it finds the command
3649 * iocb else returns NULL.
3650 **/
3651 static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * prspiocb)3652 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3653 struct lpfc_sli_ring *pring,
3654 struct lpfc_iocbq *prspiocb)
3655 {
3656 struct lpfc_iocbq *cmd_iocb = NULL;
3657 u16 iotag;
3658
3659 if (phba->sli_rev == LPFC_SLI_REV4)
3660 iotag = get_wqe_reqtag(prspiocb);
3661 else
3662 iotag = prspiocb->iocb.ulpIoTag;
3663
3664 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3665 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3666 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3667 /* remove from txcmpl queue list */
3668 list_del_init(&cmd_iocb->list);
3669 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3670 pring->txcmplq_cnt--;
3671 return cmd_iocb;
3672 }
3673 }
3674
3675 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3676 "0317 iotag x%x is out of "
3677 "range: max iotag x%x\n",
3678 iotag, phba->sli.last_iotag);
3679 return NULL;
3680 }
3681
3682 /**
3683 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3684 * @phba: Pointer to HBA context object.
3685 * @pring: Pointer to driver SLI ring object.
3686 * @iotag: IOCB tag.
3687 *
3688 * This function looks up the iocb_lookup table to get the command iocb
3689 * corresponding to the given iotag. The driver calls this function with
3690 * the ring lock held because this function is an SLI4 port only helper.
3691 * This function returns the command iocb object if it finds the command
3692 * iocb else returns NULL.
3693 **/
3694 static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint16_t iotag)3695 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3696 struct lpfc_sli_ring *pring, uint16_t iotag)
3697 {
3698 struct lpfc_iocbq *cmd_iocb = NULL;
3699
3700 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3701 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3702 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3703 /* remove from txcmpl queue list */
3704 list_del_init(&cmd_iocb->list);
3705 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3706 pring->txcmplq_cnt--;
3707 return cmd_iocb;
3708 }
3709 }
3710
3711 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3712 "0372 iotag x%x lookup error: max iotag (x%x) "
3713 "cmd_flag x%x\n",
3714 iotag, phba->sli.last_iotag,
3715 cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3716 return NULL;
3717 }
3718
3719 /**
3720 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3721 * @phba: Pointer to HBA context object.
3722 * @pring: Pointer to driver SLI ring object.
3723 * @saveq: Pointer to the response iocb to be processed.
3724 *
3725 * This function is called by the ring event handler for non-fcp
3726 * rings when there is a new response iocb in the response ring.
3727 * The caller is not required to hold any locks. This function
3728 * gets the command iocb associated with the response iocb and
3729 * calls the completion handler for the command iocb. If there
3730 * is no completion handler, the function will free the resources
3731 * associated with command iocb. If the response iocb is for
3732 * an already aborted command iocb, the status of the completion
3733 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3734 * This function always returns 1.
3735 **/
3736 static int
lpfc_sli_process_sol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq)3737 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3738 struct lpfc_iocbq *saveq)
3739 {
3740 struct lpfc_iocbq *cmdiocbp;
3741 unsigned long iflag;
3742 u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3743
3744 if (phba->sli_rev == LPFC_SLI_REV4)
3745 spin_lock_irqsave(&pring->ring_lock, iflag);
3746 else
3747 spin_lock_irqsave(&phba->hbalock, iflag);
3748 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3749 if (phba->sli_rev == LPFC_SLI_REV4)
3750 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3751 else
3752 spin_unlock_irqrestore(&phba->hbalock, iflag);
3753
3754 ulp_command = get_job_cmnd(phba, saveq);
3755 ulp_status = get_job_ulpstatus(phba, saveq);
3756 ulp_word4 = get_job_word4(phba, saveq);
3757 ulp_context = get_job_ulpcontext(phba, saveq);
3758 if (phba->sli_rev == LPFC_SLI_REV4)
3759 iotag = get_wqe_reqtag(saveq);
3760 else
3761 iotag = saveq->iocb.ulpIoTag;
3762
3763 if (cmdiocbp) {
3764 ulp_command = get_job_cmnd(phba, cmdiocbp);
3765 if (cmdiocbp->cmd_cmpl) {
3766 /*
3767 * If an ELS command failed send an event to mgmt
3768 * application.
3769 */
3770 if (ulp_status &&
3771 (pring->ringno == LPFC_ELS_RING) &&
3772 (ulp_command == CMD_ELS_REQUEST64_CR))
3773 lpfc_send_els_failure_event(phba,
3774 cmdiocbp, saveq);
3775
3776 /*
3777 * Post all ELS completions to the worker thread.
3778 * All other are passed to the completion callback.
3779 */
3780 if (pring->ringno == LPFC_ELS_RING) {
3781 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3782 (cmdiocbp->cmd_flag &
3783 LPFC_DRIVER_ABORTED)) {
3784 spin_lock_irqsave(&phba->hbalock,
3785 iflag);
3786 cmdiocbp->cmd_flag &=
3787 ~LPFC_DRIVER_ABORTED;
3788 spin_unlock_irqrestore(&phba->hbalock,
3789 iflag);
3790 saveq->iocb.ulpStatus =
3791 IOSTAT_LOCAL_REJECT;
3792 saveq->iocb.un.ulpWord[4] =
3793 IOERR_SLI_ABORTED;
3794
3795 /* Firmware could still be in progress
3796 * of DMAing payload, so don't free data
3797 * buffer till after a hbeat.
3798 */
3799 spin_lock_irqsave(&phba->hbalock,
3800 iflag);
3801 saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3802 spin_unlock_irqrestore(&phba->hbalock,
3803 iflag);
3804 }
3805 if (phba->sli_rev == LPFC_SLI_REV4) {
3806 if (saveq->cmd_flag &
3807 LPFC_EXCHANGE_BUSY) {
3808 /* Set cmdiocb flag for the
3809 * exchange busy so sgl (xri)
3810 * will not be released until
3811 * the abort xri is received
3812 * from hba.
3813 */
3814 spin_lock_irqsave(
3815 &phba->hbalock, iflag);
3816 cmdiocbp->cmd_flag |=
3817 LPFC_EXCHANGE_BUSY;
3818 spin_unlock_irqrestore(
3819 &phba->hbalock, iflag);
3820 }
3821 if (cmdiocbp->cmd_flag &
3822 LPFC_DRIVER_ABORTED) {
3823 /*
3824 * Clear LPFC_DRIVER_ABORTED
3825 * bit in case it was driver
3826 * initiated abort.
3827 */
3828 spin_lock_irqsave(
3829 &phba->hbalock, iflag);
3830 cmdiocbp->cmd_flag &=
3831 ~LPFC_DRIVER_ABORTED;
3832 spin_unlock_irqrestore(
3833 &phba->hbalock, iflag);
3834 set_job_ulpstatus(cmdiocbp,
3835 IOSTAT_LOCAL_REJECT);
3836 set_job_ulpword4(cmdiocbp,
3837 IOERR_ABORT_REQUESTED);
3838 /*
3839 * For SLI4, irspiocb contains
3840 * NO_XRI in sli_xritag, it
3841 * shall not affect releasing
3842 * sgl (xri) process.
3843 */
3844 set_job_ulpstatus(saveq,
3845 IOSTAT_LOCAL_REJECT);
3846 set_job_ulpword4(saveq,
3847 IOERR_SLI_ABORTED);
3848 spin_lock_irqsave(
3849 &phba->hbalock, iflag);
3850 saveq->cmd_flag |=
3851 LPFC_DELAY_MEM_FREE;
3852 spin_unlock_irqrestore(
3853 &phba->hbalock, iflag);
3854 }
3855 }
3856 }
3857 cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3858 } else
3859 lpfc_sli_release_iocbq(phba, cmdiocbp);
3860 } else {
3861 /*
3862 * Unknown initiating command based on the response iotag.
3863 * This could be the case on the ELS ring because of
3864 * lpfc_els_abort().
3865 */
3866 if (pring->ringno != LPFC_ELS_RING) {
3867 /*
3868 * Ring <ringno> handler: unexpected completion IoTag
3869 * <IoTag>
3870 */
3871 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3872 "0322 Ring %d handler: "
3873 "unexpected completion IoTag x%x "
3874 "Data: x%x x%x x%x x%x\n",
3875 pring->ringno, iotag, ulp_status,
3876 ulp_word4, ulp_command, ulp_context);
3877 }
3878 }
3879
3880 return 1;
3881 }
3882
3883 /**
3884 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3885 * @phba: Pointer to HBA context object.
3886 * @pring: Pointer to driver SLI ring object.
3887 *
3888 * This function is called from the iocb ring event handlers when
3889 * put pointer is ahead of the get pointer for a ring. This function signal
3890 * an error attention condition to the worker thread and the worker
3891 * thread will transition the HBA to offline state.
3892 **/
3893 static void
lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)3894 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3895 {
3896 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3897 /*
3898 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3899 * rsp ring <portRspMax>
3900 */
3901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3902 "0312 Ring %d handler: portRspPut %d "
3903 "is bigger than rsp ring %d\n",
3904 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3905 pring->sli.sli3.numRiocb);
3906
3907 phba->link_state = LPFC_HBA_ERROR;
3908
3909 /*
3910 * All error attention handlers are posted to
3911 * worker thread
3912 */
3913 phba->work_ha |= HA_ERATT;
3914 phba->work_hs = HS_FFER3;
3915
3916 lpfc_worker_wake_up(phba);
3917
3918 return;
3919 }
3920
3921 /**
3922 * lpfc_poll_eratt - Error attention polling timer timeout handler
3923 * @t: Context to fetch pointer to address of HBA context object from.
3924 *
3925 * This function is invoked by the Error Attention polling timer when the
3926 * timer times out. It will check the SLI Error Attention register for
3927 * possible attention events. If so, it will post an Error Attention event
3928 * and wake up worker thread to process it. Otherwise, it will set up the
3929 * Error Attention polling timer for the next poll.
3930 **/
lpfc_poll_eratt(struct timer_list * t)3931 void lpfc_poll_eratt(struct timer_list *t)
3932 {
3933 struct lpfc_hba *phba;
3934 uint32_t eratt = 0;
3935 uint64_t sli_intr, cnt;
3936
3937 phba = from_timer(phba, t, eratt_poll);
3938 if (!(phba->hba_flag & HBA_SETUP))
3939 return;
3940
3941 /* Here we will also keep track of interrupts per sec of the hba */
3942 sli_intr = phba->sli.slistat.sli_intr;
3943
3944 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3945 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3946 sli_intr);
3947 else
3948 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3949
3950 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3951 do_div(cnt, phba->eratt_poll_interval);
3952 phba->sli.slistat.sli_ips = cnt;
3953
3954 phba->sli.slistat.sli_prev_intr = sli_intr;
3955
3956 /* Check chip HA register for error event */
3957 eratt = lpfc_sli_check_eratt(phba);
3958
3959 if (eratt)
3960 /* Tell the worker thread there is work to do */
3961 lpfc_worker_wake_up(phba);
3962 else
3963 /* Restart the timer for next eratt poll */
3964 mod_timer(&phba->eratt_poll,
3965 jiffies +
3966 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3967 return;
3968 }
3969
3970
3971 /**
3972 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3973 * @phba: Pointer to HBA context object.
3974 * @pring: Pointer to driver SLI ring object.
3975 * @mask: Host attention register mask for this ring.
3976 *
3977 * This function is called from the interrupt context when there is a ring
3978 * event for the fcp ring. The caller does not hold any lock.
3979 * The function processes each response iocb in the response ring until it
3980 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3981 * LE bit set. The function will call the completion handler of the command iocb
3982 * if the response iocb indicates a completion for a command iocb or it is
3983 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3984 * function if this is an unsolicited iocb.
3985 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3986 * to check it explicitly.
3987 */
3988 int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)3989 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3990 struct lpfc_sli_ring *pring, uint32_t mask)
3991 {
3992 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3993 IOCB_t *irsp = NULL;
3994 IOCB_t *entry = NULL;
3995 struct lpfc_iocbq *cmdiocbq = NULL;
3996 struct lpfc_iocbq rspiocbq;
3997 uint32_t status;
3998 uint32_t portRspPut, portRspMax;
3999 int rc = 1;
4000 lpfc_iocb_type type;
4001 unsigned long iflag;
4002 uint32_t rsp_cmpl = 0;
4003
4004 spin_lock_irqsave(&phba->hbalock, iflag);
4005 pring->stats.iocb_event++;
4006
4007 /*
4008 * The next available response entry should never exceed the maximum
4009 * entries. If it does, treat it as an adapter hardware error.
4010 */
4011 portRspMax = pring->sli.sli3.numRiocb;
4012 portRspPut = le32_to_cpu(pgp->rspPutInx);
4013 if (unlikely(portRspPut >= portRspMax)) {
4014 lpfc_sli_rsp_pointers_error(phba, pring);
4015 spin_unlock_irqrestore(&phba->hbalock, iflag);
4016 return 1;
4017 }
4018 if (phba->fcp_ring_in_use) {
4019 spin_unlock_irqrestore(&phba->hbalock, iflag);
4020 return 1;
4021 } else
4022 phba->fcp_ring_in_use = 1;
4023
4024 rmb();
4025 while (pring->sli.sli3.rspidx != portRspPut) {
4026 /*
4027 * Fetch an entry off the ring and copy it into a local data
4028 * structure. The copy involves a byte-swap since the
4029 * network byte order and pci byte orders are different.
4030 */
4031 entry = lpfc_resp_iocb(phba, pring);
4032 phba->last_completion_time = jiffies;
4033
4034 if (++pring->sli.sli3.rspidx >= portRspMax)
4035 pring->sli.sli3.rspidx = 0;
4036
4037 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4038 (uint32_t *) &rspiocbq.iocb,
4039 phba->iocb_rsp_size);
4040 INIT_LIST_HEAD(&(rspiocbq.list));
4041 irsp = &rspiocbq.iocb;
4042
4043 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4044 pring->stats.iocb_rsp++;
4045 rsp_cmpl++;
4046
4047 if (unlikely(irsp->ulpStatus)) {
4048 /*
4049 * If resource errors reported from HBA, reduce
4050 * queuedepths of the SCSI device.
4051 */
4052 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4053 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4054 IOERR_NO_RESOURCES)) {
4055 spin_unlock_irqrestore(&phba->hbalock, iflag);
4056 phba->lpfc_rampdown_queue_depth(phba);
4057 spin_lock_irqsave(&phba->hbalock, iflag);
4058 }
4059
4060 /* Rsp ring <ringno> error: IOCB */
4061 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4062 "0336 Rsp Ring %d error: IOCB Data: "
4063 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
4064 pring->ringno,
4065 irsp->un.ulpWord[0],
4066 irsp->un.ulpWord[1],
4067 irsp->un.ulpWord[2],
4068 irsp->un.ulpWord[3],
4069 irsp->un.ulpWord[4],
4070 irsp->un.ulpWord[5],
4071 *(uint32_t *)&irsp->un1,
4072 *((uint32_t *)&irsp->un1 + 1));
4073 }
4074
4075 switch (type) {
4076 case LPFC_ABORT_IOCB:
4077 case LPFC_SOL_IOCB:
4078 /*
4079 * Idle exchange closed via ABTS from port. No iocb
4080 * resources need to be recovered.
4081 */
4082 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4083 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4084 "0333 IOCB cmd 0x%x"
4085 " processed. Skipping"
4086 " completion\n",
4087 irsp->ulpCommand);
4088 break;
4089 }
4090
4091 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4092 &rspiocbq);
4093 if (unlikely(!cmdiocbq))
4094 break;
4095 if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4096 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4097 if (cmdiocbq->cmd_cmpl) {
4098 spin_unlock_irqrestore(&phba->hbalock, iflag);
4099 cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4100 spin_lock_irqsave(&phba->hbalock, iflag);
4101 }
4102 break;
4103 case LPFC_UNSOL_IOCB:
4104 spin_unlock_irqrestore(&phba->hbalock, iflag);
4105 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4106 spin_lock_irqsave(&phba->hbalock, iflag);
4107 break;
4108 default:
4109 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4110 char adaptermsg[LPFC_MAX_ADPTMSG];
4111 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4112 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4113 MAX_MSG_DATA);
4114 dev_warn(&((phba->pcidev)->dev),
4115 "lpfc%d: %s\n",
4116 phba->brd_no, adaptermsg);
4117 } else {
4118 /* Unknown IOCB command */
4119 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4120 "0334 Unknown IOCB command "
4121 "Data: x%x, x%x x%x x%x x%x\n",
4122 type, irsp->ulpCommand,
4123 irsp->ulpStatus,
4124 irsp->ulpIoTag,
4125 irsp->ulpContext);
4126 }
4127 break;
4128 }
4129
4130 /*
4131 * The response IOCB has been processed. Update the ring
4132 * pointer in SLIM. If the port response put pointer has not
4133 * been updated, sync the pgp->rspPutInx and fetch the new port
4134 * response put pointer.
4135 */
4136 writel(pring->sli.sli3.rspidx,
4137 &phba->host_gp[pring->ringno].rspGetInx);
4138
4139 if (pring->sli.sli3.rspidx == portRspPut)
4140 portRspPut = le32_to_cpu(pgp->rspPutInx);
4141 }
4142
4143 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4144 pring->stats.iocb_rsp_full++;
4145 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4146 writel(status, phba->CAregaddr);
4147 readl(phba->CAregaddr);
4148 }
4149 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4150 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4151 pring->stats.iocb_cmd_empty++;
4152
4153 /* Force update of the local copy of cmdGetInx */
4154 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4155 lpfc_sli_resume_iocb(phba, pring);
4156
4157 if ((pring->lpfc_sli_cmd_available))
4158 (pring->lpfc_sli_cmd_available) (phba, pring);
4159
4160 }
4161
4162 phba->fcp_ring_in_use = 0;
4163 spin_unlock_irqrestore(&phba->hbalock, iflag);
4164 return rc;
4165 }
4166
4167 /**
4168 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4169 * @phba: Pointer to HBA context object.
4170 * @pring: Pointer to driver SLI ring object.
4171 * @rspiocbp: Pointer to driver response IOCB object.
4172 *
4173 * This function is called from the worker thread when there is a slow-path
4174 * response IOCB to process. This function chains all the response iocbs until
4175 * seeing the iocb with the LE bit set. The function will call
4176 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4177 * completion of a command iocb. The function will call the
4178 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4179 * The function frees the resources or calls the completion handler if this
4180 * iocb is an abort completion. The function returns NULL when the response
4181 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4182 * this function shall chain the iocb on to the iocb_continueq and return the
4183 * response iocb passed in.
4184 **/
4185 static struct lpfc_iocbq *
lpfc_sli_sp_handle_rspiocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * rspiocbp)4186 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4187 struct lpfc_iocbq *rspiocbp)
4188 {
4189 struct lpfc_iocbq *saveq;
4190 struct lpfc_iocbq *cmdiocb;
4191 struct lpfc_iocbq *next_iocb;
4192 IOCB_t *irsp;
4193 uint32_t free_saveq;
4194 u8 cmd_type;
4195 lpfc_iocb_type type;
4196 unsigned long iflag;
4197 u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4198 u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4199 u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4200 int rc;
4201
4202 spin_lock_irqsave(&phba->hbalock, iflag);
4203 /* First add the response iocb to the countinueq list */
4204 list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4205 pring->iocb_continueq_cnt++;
4206
4207 /*
4208 * By default, the driver expects to free all resources
4209 * associated with this iocb completion.
4210 */
4211 free_saveq = 1;
4212 saveq = list_get_first(&pring->iocb_continueq,
4213 struct lpfc_iocbq, list);
4214 list_del_init(&pring->iocb_continueq);
4215 pring->iocb_continueq_cnt = 0;
4216
4217 pring->stats.iocb_rsp++;
4218
4219 /*
4220 * If resource errors reported from HBA, reduce
4221 * queuedepths of the SCSI device.
4222 */
4223 if (ulp_status == IOSTAT_LOCAL_REJECT &&
4224 ((ulp_word4 & IOERR_PARAM_MASK) ==
4225 IOERR_NO_RESOURCES)) {
4226 spin_unlock_irqrestore(&phba->hbalock, iflag);
4227 phba->lpfc_rampdown_queue_depth(phba);
4228 spin_lock_irqsave(&phba->hbalock, iflag);
4229 }
4230
4231 if (ulp_status) {
4232 /* Rsp ring <ringno> error: IOCB */
4233 if (phba->sli_rev < LPFC_SLI_REV4) {
4234 irsp = &rspiocbp->iocb;
4235 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4236 "0328 Rsp Ring %d error: ulp_status x%x "
4237 "IOCB Data: "
4238 "x%08x x%08x x%08x x%08x "
4239 "x%08x x%08x x%08x x%08x "
4240 "x%08x x%08x x%08x x%08x "
4241 "x%08x x%08x x%08x x%08x\n",
4242 pring->ringno, ulp_status,
4243 get_job_ulpword(rspiocbp, 0),
4244 get_job_ulpword(rspiocbp, 1),
4245 get_job_ulpword(rspiocbp, 2),
4246 get_job_ulpword(rspiocbp, 3),
4247 get_job_ulpword(rspiocbp, 4),
4248 get_job_ulpword(rspiocbp, 5),
4249 *(((uint32_t *)irsp) + 6),
4250 *(((uint32_t *)irsp) + 7),
4251 *(((uint32_t *)irsp) + 8),
4252 *(((uint32_t *)irsp) + 9),
4253 *(((uint32_t *)irsp) + 10),
4254 *(((uint32_t *)irsp) + 11),
4255 *(((uint32_t *)irsp) + 12),
4256 *(((uint32_t *)irsp) + 13),
4257 *(((uint32_t *)irsp) + 14),
4258 *(((uint32_t *)irsp) + 15));
4259 } else {
4260 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4261 "0321 Rsp Ring %d error: "
4262 "IOCB Data: "
4263 "x%x x%x x%x x%x\n",
4264 pring->ringno,
4265 rspiocbp->wcqe_cmpl.word0,
4266 rspiocbp->wcqe_cmpl.total_data_placed,
4267 rspiocbp->wcqe_cmpl.parameter,
4268 rspiocbp->wcqe_cmpl.word3);
4269 }
4270 }
4271
4272
4273 /*
4274 * Fetch the iocb command type and call the correct completion
4275 * routine. Solicited and Unsolicited IOCBs on the ELS ring
4276 * get freed back to the lpfc_iocb_list by the discovery
4277 * kernel thread.
4278 */
4279 cmd_type = ulp_command & CMD_IOCB_MASK;
4280 type = lpfc_sli_iocb_cmd_type(cmd_type);
4281 switch (type) {
4282 case LPFC_SOL_IOCB:
4283 spin_unlock_irqrestore(&phba->hbalock, iflag);
4284 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4285 spin_lock_irqsave(&phba->hbalock, iflag);
4286 break;
4287 case LPFC_UNSOL_IOCB:
4288 spin_unlock_irqrestore(&phba->hbalock, iflag);
4289 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4290 spin_lock_irqsave(&phba->hbalock, iflag);
4291 if (!rc)
4292 free_saveq = 0;
4293 break;
4294 case LPFC_ABORT_IOCB:
4295 cmdiocb = NULL;
4296 if (ulp_command != CMD_XRI_ABORTED_CX)
4297 cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4298 saveq);
4299 if (cmdiocb) {
4300 /* Call the specified completion routine */
4301 if (cmdiocb->cmd_cmpl) {
4302 spin_unlock_irqrestore(&phba->hbalock, iflag);
4303 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4304 spin_lock_irqsave(&phba->hbalock, iflag);
4305 } else {
4306 __lpfc_sli_release_iocbq(phba, cmdiocb);
4307 }
4308 }
4309 break;
4310 case LPFC_UNKNOWN_IOCB:
4311 if (ulp_command == CMD_ADAPTER_MSG) {
4312 char adaptermsg[LPFC_MAX_ADPTMSG];
4313
4314 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4315 memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4316 MAX_MSG_DATA);
4317 dev_warn(&((phba->pcidev)->dev),
4318 "lpfc%d: %s\n",
4319 phba->brd_no, adaptermsg);
4320 } else {
4321 /* Unknown command */
4322 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4323 "0335 Unknown IOCB "
4324 "command Data: x%x "
4325 "x%x x%x x%x\n",
4326 ulp_command,
4327 ulp_status,
4328 get_wqe_reqtag(rspiocbp),
4329 get_job_ulpcontext(phba, rspiocbp));
4330 }
4331 break;
4332 }
4333
4334 if (free_saveq) {
4335 list_for_each_entry_safe(rspiocbp, next_iocb,
4336 &saveq->list, list) {
4337 list_del_init(&rspiocbp->list);
4338 __lpfc_sli_release_iocbq(phba, rspiocbp);
4339 }
4340 __lpfc_sli_release_iocbq(phba, saveq);
4341 }
4342 rspiocbp = NULL;
4343 spin_unlock_irqrestore(&phba->hbalock, iflag);
4344 return rspiocbp;
4345 }
4346
4347 /**
4348 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4349 * @phba: Pointer to HBA context object.
4350 * @pring: Pointer to driver SLI ring object.
4351 * @mask: Host attention register mask for this ring.
4352 *
4353 * This routine wraps the actual slow_ring event process routine from the
4354 * API jump table function pointer from the lpfc_hba struct.
4355 **/
4356 void
lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)4357 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4358 struct lpfc_sli_ring *pring, uint32_t mask)
4359 {
4360 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4361 }
4362
4363 /**
4364 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4365 * @phba: Pointer to HBA context object.
4366 * @pring: Pointer to driver SLI ring object.
4367 * @mask: Host attention register mask for this ring.
4368 *
4369 * This function is called from the worker thread when there is a ring event
4370 * for non-fcp rings. The caller does not hold any lock. The function will
4371 * remove each response iocb in the response ring and calls the handle
4372 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4373 **/
4374 static void
lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)4375 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4376 struct lpfc_sli_ring *pring, uint32_t mask)
4377 {
4378 struct lpfc_pgp *pgp;
4379 IOCB_t *entry;
4380 IOCB_t *irsp = NULL;
4381 struct lpfc_iocbq *rspiocbp = NULL;
4382 uint32_t portRspPut, portRspMax;
4383 unsigned long iflag;
4384 uint32_t status;
4385
4386 pgp = &phba->port_gp[pring->ringno];
4387 spin_lock_irqsave(&phba->hbalock, iflag);
4388 pring->stats.iocb_event++;
4389
4390 /*
4391 * The next available response entry should never exceed the maximum
4392 * entries. If it does, treat it as an adapter hardware error.
4393 */
4394 portRspMax = pring->sli.sli3.numRiocb;
4395 portRspPut = le32_to_cpu(pgp->rspPutInx);
4396 if (portRspPut >= portRspMax) {
4397 /*
4398 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4399 * rsp ring <portRspMax>
4400 */
4401 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4402 "0303 Ring %d handler: portRspPut %d "
4403 "is bigger than rsp ring %d\n",
4404 pring->ringno, portRspPut, portRspMax);
4405
4406 phba->link_state = LPFC_HBA_ERROR;
4407 spin_unlock_irqrestore(&phba->hbalock, iflag);
4408
4409 phba->work_hs = HS_FFER3;
4410 lpfc_handle_eratt(phba);
4411
4412 return;
4413 }
4414
4415 rmb();
4416 while (pring->sli.sli3.rspidx != portRspPut) {
4417 /*
4418 * Build a completion list and call the appropriate handler.
4419 * The process is to get the next available response iocb, get
4420 * a free iocb from the list, copy the response data into the
4421 * free iocb, insert to the continuation list, and update the
4422 * next response index to slim. This process makes response
4423 * iocb's in the ring available to DMA as fast as possible but
4424 * pays a penalty for a copy operation. Since the iocb is
4425 * only 32 bytes, this penalty is considered small relative to
4426 * the PCI reads for register values and a slim write. When
4427 * the ulpLe field is set, the entire Command has been
4428 * received.
4429 */
4430 entry = lpfc_resp_iocb(phba, pring);
4431
4432 phba->last_completion_time = jiffies;
4433 rspiocbp = __lpfc_sli_get_iocbq(phba);
4434 if (rspiocbp == NULL) {
4435 printk(KERN_ERR "%s: out of buffers! Failing "
4436 "completion.\n", __func__);
4437 break;
4438 }
4439
4440 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4441 phba->iocb_rsp_size);
4442 irsp = &rspiocbp->iocb;
4443
4444 if (++pring->sli.sli3.rspidx >= portRspMax)
4445 pring->sli.sli3.rspidx = 0;
4446
4447 if (pring->ringno == LPFC_ELS_RING) {
4448 lpfc_debugfs_slow_ring_trc(phba,
4449 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4450 *(((uint32_t *) irsp) + 4),
4451 *(((uint32_t *) irsp) + 6),
4452 *(((uint32_t *) irsp) + 7));
4453 }
4454
4455 writel(pring->sli.sli3.rspidx,
4456 &phba->host_gp[pring->ringno].rspGetInx);
4457
4458 spin_unlock_irqrestore(&phba->hbalock, iflag);
4459 /* Handle the response IOCB */
4460 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4461 spin_lock_irqsave(&phba->hbalock, iflag);
4462
4463 /*
4464 * If the port response put pointer has not been updated, sync
4465 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4466 * response put pointer.
4467 */
4468 if (pring->sli.sli3.rspidx == portRspPut) {
4469 portRspPut = le32_to_cpu(pgp->rspPutInx);
4470 }
4471 } /* while (pring->sli.sli3.rspidx != portRspPut) */
4472
4473 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4474 /* At least one response entry has been freed */
4475 pring->stats.iocb_rsp_full++;
4476 /* SET RxRE_RSP in Chip Att register */
4477 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4478 writel(status, phba->CAregaddr);
4479 readl(phba->CAregaddr); /* flush */
4480 }
4481 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4482 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4483 pring->stats.iocb_cmd_empty++;
4484
4485 /* Force update of the local copy of cmdGetInx */
4486 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4487 lpfc_sli_resume_iocb(phba, pring);
4488
4489 if ((pring->lpfc_sli_cmd_available))
4490 (pring->lpfc_sli_cmd_available) (phba, pring);
4491
4492 }
4493
4494 spin_unlock_irqrestore(&phba->hbalock, iflag);
4495 return;
4496 }
4497
4498 /**
4499 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4500 * @phba: Pointer to HBA context object.
4501 * @pring: Pointer to driver SLI ring object.
4502 * @mask: Host attention register mask for this ring.
4503 *
4504 * This function is called from the worker thread when there is a pending
4505 * ELS response iocb on the driver internal slow-path response iocb worker
4506 * queue. The caller does not hold any lock. The function will remove each
4507 * response iocb from the response worker queue and calls the handle
4508 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4509 **/
4510 static void
lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)4511 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4512 struct lpfc_sli_ring *pring, uint32_t mask)
4513 {
4514 struct lpfc_iocbq *irspiocbq;
4515 struct hbq_dmabuf *dmabuf;
4516 struct lpfc_cq_event *cq_event;
4517 unsigned long iflag;
4518 int count = 0;
4519
4520 spin_lock_irqsave(&phba->hbalock, iflag);
4521 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4522 spin_unlock_irqrestore(&phba->hbalock, iflag);
4523 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4524 /* Get the response iocb from the head of work queue */
4525 spin_lock_irqsave(&phba->hbalock, iflag);
4526 list_remove_head(&phba->sli4_hba.sp_queue_event,
4527 cq_event, struct lpfc_cq_event, list);
4528 spin_unlock_irqrestore(&phba->hbalock, iflag);
4529
4530 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4531 case CQE_CODE_COMPL_WQE:
4532 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4533 cq_event);
4534 /* Translate ELS WCQE to response IOCBQ */
4535 irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4536 irspiocbq);
4537 if (irspiocbq)
4538 lpfc_sli_sp_handle_rspiocb(phba, pring,
4539 irspiocbq);
4540 count++;
4541 break;
4542 case CQE_CODE_RECEIVE:
4543 case CQE_CODE_RECEIVE_V1:
4544 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4545 cq_event);
4546 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4547 count++;
4548 break;
4549 default:
4550 break;
4551 }
4552
4553 /* Limit the number of events to 64 to avoid soft lockups */
4554 if (count == 64)
4555 break;
4556 }
4557 }
4558
4559 /**
4560 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4561 * @phba: Pointer to HBA context object.
4562 * @pring: Pointer to driver SLI ring object.
4563 *
4564 * This function aborts all iocbs in the given ring and frees all the iocb
4565 * objects in txq. This function issues an abort iocb for all the iocb commands
4566 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4567 * the return of this function. The caller is not required to hold any locks.
4568 **/
4569 void
lpfc_sli_abort_iocb_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)4570 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4571 {
4572 LIST_HEAD(tx_completions);
4573 LIST_HEAD(txcmplq_completions);
4574 struct lpfc_iocbq *iocb, *next_iocb;
4575 int offline;
4576
4577 if (pring->ringno == LPFC_ELS_RING) {
4578 lpfc_fabric_abort_hba(phba);
4579 }
4580 offline = pci_channel_offline(phba->pcidev);
4581
4582 /* Error everything on txq and txcmplq
4583 * First do the txq.
4584 */
4585 if (phba->sli_rev >= LPFC_SLI_REV4) {
4586 spin_lock_irq(&pring->ring_lock);
4587 list_splice_init(&pring->txq, &tx_completions);
4588 pring->txq_cnt = 0;
4589
4590 if (offline) {
4591 list_splice_init(&pring->txcmplq,
4592 &txcmplq_completions);
4593 } else {
4594 /* Next issue ABTS for everything on the txcmplq */
4595 list_for_each_entry_safe(iocb, next_iocb,
4596 &pring->txcmplq, list)
4597 lpfc_sli_issue_abort_iotag(phba, pring,
4598 iocb, NULL);
4599 }
4600 spin_unlock_irq(&pring->ring_lock);
4601 } else {
4602 spin_lock_irq(&phba->hbalock);
4603 list_splice_init(&pring->txq, &tx_completions);
4604 pring->txq_cnt = 0;
4605
4606 if (offline) {
4607 list_splice_init(&pring->txcmplq, &txcmplq_completions);
4608 } else {
4609 /* Next issue ABTS for everything on the txcmplq */
4610 list_for_each_entry_safe(iocb, next_iocb,
4611 &pring->txcmplq, list)
4612 lpfc_sli_issue_abort_iotag(phba, pring,
4613 iocb, NULL);
4614 }
4615 spin_unlock_irq(&phba->hbalock);
4616 }
4617
4618 if (offline) {
4619 /* Cancel all the IOCBs from the completions list */
4620 lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
4621 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
4622 } else {
4623 /* Make sure HBA is alive */
4624 lpfc_issue_hb_tmo(phba);
4625 }
4626 /* Cancel all the IOCBs from the completions list */
4627 lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
4628 IOERR_SLI_ABORTED);
4629 }
4630
4631 /**
4632 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4633 * @phba: Pointer to HBA context object.
4634 *
4635 * This function aborts all iocbs in FCP rings and frees all the iocb
4636 * objects in txq. This function issues an abort iocb for all the iocb commands
4637 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4638 * the return of this function. The caller is not required to hold any locks.
4639 **/
4640 void
lpfc_sli_abort_fcp_rings(struct lpfc_hba * phba)4641 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4642 {
4643 struct lpfc_sli *psli = &phba->sli;
4644 struct lpfc_sli_ring *pring;
4645 uint32_t i;
4646
4647 /* Look on all the FCP Rings for the iotag */
4648 if (phba->sli_rev >= LPFC_SLI_REV4) {
4649 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4650 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4651 lpfc_sli_abort_iocb_ring(phba, pring);
4652 }
4653 } else {
4654 pring = &psli->sli3_ring[LPFC_FCP_RING];
4655 lpfc_sli_abort_iocb_ring(phba, pring);
4656 }
4657 }
4658
4659 /**
4660 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4661 * @phba: Pointer to HBA context object.
4662 *
4663 * This function flushes all iocbs in the IO ring and frees all the iocb
4664 * objects in txq and txcmplq. This function will not issue abort iocbs
4665 * for all the iocb commands in txcmplq, they will just be returned with
4666 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4667 * slot has been permanently disabled.
4668 **/
4669 void
lpfc_sli_flush_io_rings(struct lpfc_hba * phba)4670 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4671 {
4672 LIST_HEAD(txq);
4673 LIST_HEAD(txcmplq);
4674 struct lpfc_sli *psli = &phba->sli;
4675 struct lpfc_sli_ring *pring;
4676 uint32_t i;
4677 struct lpfc_iocbq *piocb, *next_iocb;
4678
4679 spin_lock_irq(&phba->hbalock);
4680 /* Indicate the I/O queues are flushed */
4681 phba->hba_flag |= HBA_IOQ_FLUSH;
4682 spin_unlock_irq(&phba->hbalock);
4683
4684 /* Look on all the FCP Rings for the iotag */
4685 if (phba->sli_rev >= LPFC_SLI_REV4) {
4686 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4687 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4688
4689 spin_lock_irq(&pring->ring_lock);
4690 /* Retrieve everything on txq */
4691 list_splice_init(&pring->txq, &txq);
4692 list_for_each_entry_safe(piocb, next_iocb,
4693 &pring->txcmplq, list)
4694 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4695 /* Retrieve everything on the txcmplq */
4696 list_splice_init(&pring->txcmplq, &txcmplq);
4697 pring->txq_cnt = 0;
4698 pring->txcmplq_cnt = 0;
4699 spin_unlock_irq(&pring->ring_lock);
4700
4701 /* Flush the txq */
4702 lpfc_sli_cancel_iocbs(phba, &txq,
4703 IOSTAT_LOCAL_REJECT,
4704 IOERR_SLI_DOWN);
4705 /* Flush the txcmplq */
4706 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4707 IOSTAT_LOCAL_REJECT,
4708 IOERR_SLI_DOWN);
4709 if (unlikely(pci_channel_offline(phba->pcidev)))
4710 lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4711 }
4712 } else {
4713 pring = &psli->sli3_ring[LPFC_FCP_RING];
4714
4715 spin_lock_irq(&phba->hbalock);
4716 /* Retrieve everything on txq */
4717 list_splice_init(&pring->txq, &txq);
4718 list_for_each_entry_safe(piocb, next_iocb,
4719 &pring->txcmplq, list)
4720 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4721 /* Retrieve everything on the txcmplq */
4722 list_splice_init(&pring->txcmplq, &txcmplq);
4723 pring->txq_cnt = 0;
4724 pring->txcmplq_cnt = 0;
4725 spin_unlock_irq(&phba->hbalock);
4726
4727 /* Flush the txq */
4728 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4729 IOERR_SLI_DOWN);
4730 /* Flush the txcmpq */
4731 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4732 IOERR_SLI_DOWN);
4733 }
4734 }
4735
4736 /**
4737 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4738 * @phba: Pointer to HBA context object.
4739 * @mask: Bit mask to be checked.
4740 *
4741 * This function reads the host status register and compares
4742 * with the provided bit mask to check if HBA completed
4743 * the restart. This function will wait in a loop for the
4744 * HBA to complete restart. If the HBA does not restart within
4745 * 15 iterations, the function will reset the HBA again. The
4746 * function returns 1 when HBA fail to restart otherwise returns
4747 * zero.
4748 **/
4749 static int
lpfc_sli_brdready_s3(struct lpfc_hba * phba,uint32_t mask)4750 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4751 {
4752 uint32_t status;
4753 int i = 0;
4754 int retval = 0;
4755
4756 /* Read the HBA Host Status Register */
4757 if (lpfc_readl(phba->HSregaddr, &status))
4758 return 1;
4759
4760 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4761
4762 /*
4763 * Check status register every 100ms for 5 retries, then every
4764 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4765 * every 2.5 sec for 4.
4766 * Break our of the loop if errors occurred during init.
4767 */
4768 while (((status & mask) != mask) &&
4769 !(status & HS_FFERM) &&
4770 i++ < 20) {
4771
4772 if (i <= 5)
4773 msleep(10);
4774 else if (i <= 10)
4775 msleep(500);
4776 else
4777 msleep(2500);
4778
4779 if (i == 15) {
4780 /* Do post */
4781 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4782 lpfc_sli_brdrestart(phba);
4783 }
4784 /* Read the HBA Host Status Register */
4785 if (lpfc_readl(phba->HSregaddr, &status)) {
4786 retval = 1;
4787 break;
4788 }
4789 }
4790
4791 /* Check to see if any errors occurred during init */
4792 if ((status & HS_FFERM) || (i >= 20)) {
4793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4794 "2751 Adapter failed to restart, "
4795 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4796 status,
4797 readl(phba->MBslimaddr + 0xa8),
4798 readl(phba->MBslimaddr + 0xac));
4799 phba->link_state = LPFC_HBA_ERROR;
4800 retval = 1;
4801 }
4802
4803 return retval;
4804 }
4805
4806 /**
4807 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4808 * @phba: Pointer to HBA context object.
4809 * @mask: Bit mask to be checked.
4810 *
4811 * This function checks the host status register to check if HBA is
4812 * ready. This function will wait in a loop for the HBA to be ready
4813 * If the HBA is not ready , the function will will reset the HBA PCI
4814 * function again. The function returns 1 when HBA fail to be ready
4815 * otherwise returns zero.
4816 **/
4817 static int
lpfc_sli_brdready_s4(struct lpfc_hba * phba,uint32_t mask)4818 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4819 {
4820 uint32_t status;
4821 int retval = 0;
4822
4823 /* Read the HBA Host Status Register */
4824 status = lpfc_sli4_post_status_check(phba);
4825
4826 if (status) {
4827 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4828 lpfc_sli_brdrestart(phba);
4829 status = lpfc_sli4_post_status_check(phba);
4830 }
4831
4832 /* Check to see if any errors occurred during init */
4833 if (status) {
4834 phba->link_state = LPFC_HBA_ERROR;
4835 retval = 1;
4836 } else
4837 phba->sli4_hba.intr_enable = 0;
4838
4839 phba->hba_flag &= ~HBA_SETUP;
4840 return retval;
4841 }
4842
4843 /**
4844 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4845 * @phba: Pointer to HBA context object.
4846 * @mask: Bit mask to be checked.
4847 *
4848 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4849 * from the API jump table function pointer from the lpfc_hba struct.
4850 **/
4851 int
lpfc_sli_brdready(struct lpfc_hba * phba,uint32_t mask)4852 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4853 {
4854 return phba->lpfc_sli_brdready(phba, mask);
4855 }
4856
4857 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4858
4859 /**
4860 * lpfc_reset_barrier - Make HBA ready for HBA reset
4861 * @phba: Pointer to HBA context object.
4862 *
4863 * This function is called before resetting an HBA. This function is called
4864 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4865 **/
lpfc_reset_barrier(struct lpfc_hba * phba)4866 void lpfc_reset_barrier(struct lpfc_hba *phba)
4867 {
4868 uint32_t __iomem *resp_buf;
4869 uint32_t __iomem *mbox_buf;
4870 volatile struct MAILBOX_word0 mbox;
4871 uint32_t hc_copy, ha_copy, resp_data;
4872 int i;
4873 uint8_t hdrtype;
4874
4875 lockdep_assert_held(&phba->hbalock);
4876
4877 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4878 if (hdrtype != 0x80 ||
4879 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4880 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4881 return;
4882
4883 /*
4884 * Tell the other part of the chip to suspend temporarily all
4885 * its DMA activity.
4886 */
4887 resp_buf = phba->MBslimaddr;
4888
4889 /* Disable the error attention */
4890 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4891 return;
4892 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4893 readl(phba->HCregaddr); /* flush */
4894 phba->link_flag |= LS_IGNORE_ERATT;
4895
4896 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4897 return;
4898 if (ha_copy & HA_ERATT) {
4899 /* Clear Chip error bit */
4900 writel(HA_ERATT, phba->HAregaddr);
4901 phba->pport->stopped = 1;
4902 }
4903
4904 mbox.word0 = 0;
4905 mbox.mbxCommand = MBX_KILL_BOARD;
4906 mbox.mbxOwner = OWN_CHIP;
4907
4908 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4909 mbox_buf = phba->MBslimaddr;
4910 writel(mbox.word0, mbox_buf);
4911
4912 for (i = 0; i < 50; i++) {
4913 if (lpfc_readl((resp_buf + 1), &resp_data))
4914 return;
4915 if (resp_data != ~(BARRIER_TEST_PATTERN))
4916 mdelay(1);
4917 else
4918 break;
4919 }
4920 resp_data = 0;
4921 if (lpfc_readl((resp_buf + 1), &resp_data))
4922 return;
4923 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4924 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4925 phba->pport->stopped)
4926 goto restore_hc;
4927 else
4928 goto clear_errat;
4929 }
4930
4931 mbox.mbxOwner = OWN_HOST;
4932 resp_data = 0;
4933 for (i = 0; i < 500; i++) {
4934 if (lpfc_readl(resp_buf, &resp_data))
4935 return;
4936 if (resp_data != mbox.word0)
4937 mdelay(1);
4938 else
4939 break;
4940 }
4941
4942 clear_errat:
4943
4944 while (++i < 500) {
4945 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4946 return;
4947 if (!(ha_copy & HA_ERATT))
4948 mdelay(1);
4949 else
4950 break;
4951 }
4952
4953 if (readl(phba->HAregaddr) & HA_ERATT) {
4954 writel(HA_ERATT, phba->HAregaddr);
4955 phba->pport->stopped = 1;
4956 }
4957
4958 restore_hc:
4959 phba->link_flag &= ~LS_IGNORE_ERATT;
4960 writel(hc_copy, phba->HCregaddr);
4961 readl(phba->HCregaddr); /* flush */
4962 }
4963
4964 /**
4965 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4966 * @phba: Pointer to HBA context object.
4967 *
4968 * This function issues a kill_board mailbox command and waits for
4969 * the error attention interrupt. This function is called for stopping
4970 * the firmware processing. The caller is not required to hold any
4971 * locks. This function calls lpfc_hba_down_post function to free
4972 * any pending commands after the kill. The function will return 1 when it
4973 * fails to kill the board else will return 0.
4974 **/
4975 int
lpfc_sli_brdkill(struct lpfc_hba * phba)4976 lpfc_sli_brdkill(struct lpfc_hba *phba)
4977 {
4978 struct lpfc_sli *psli;
4979 LPFC_MBOXQ_t *pmb;
4980 uint32_t status;
4981 uint32_t ha_copy;
4982 int retval;
4983 int i = 0;
4984
4985 psli = &phba->sli;
4986
4987 /* Kill HBA */
4988 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4989 "0329 Kill HBA Data: x%x x%x\n",
4990 phba->pport->port_state, psli->sli_flag);
4991
4992 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4993 if (!pmb)
4994 return 1;
4995
4996 /* Disable the error attention */
4997 spin_lock_irq(&phba->hbalock);
4998 if (lpfc_readl(phba->HCregaddr, &status)) {
4999 spin_unlock_irq(&phba->hbalock);
5000 mempool_free(pmb, phba->mbox_mem_pool);
5001 return 1;
5002 }
5003 status &= ~HC_ERINT_ENA;
5004 writel(status, phba->HCregaddr);
5005 readl(phba->HCregaddr); /* flush */
5006 phba->link_flag |= LS_IGNORE_ERATT;
5007 spin_unlock_irq(&phba->hbalock);
5008
5009 lpfc_kill_board(phba, pmb);
5010 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5011 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5012
5013 if (retval != MBX_SUCCESS) {
5014 if (retval != MBX_BUSY)
5015 mempool_free(pmb, phba->mbox_mem_pool);
5016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5017 "2752 KILL_BOARD command failed retval %d\n",
5018 retval);
5019 spin_lock_irq(&phba->hbalock);
5020 phba->link_flag &= ~LS_IGNORE_ERATT;
5021 spin_unlock_irq(&phba->hbalock);
5022 return 1;
5023 }
5024
5025 spin_lock_irq(&phba->hbalock);
5026 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
5027 spin_unlock_irq(&phba->hbalock);
5028
5029 mempool_free(pmb, phba->mbox_mem_pool);
5030
5031 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
5032 * attention every 100ms for 3 seconds. If we don't get ERATT after
5033 * 3 seconds we still set HBA_ERROR state because the status of the
5034 * board is now undefined.
5035 */
5036 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5037 return 1;
5038 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
5039 mdelay(100);
5040 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5041 return 1;
5042 }
5043
5044 del_timer_sync(&psli->mbox_tmo);
5045 if (ha_copy & HA_ERATT) {
5046 writel(HA_ERATT, phba->HAregaddr);
5047 phba->pport->stopped = 1;
5048 }
5049 spin_lock_irq(&phba->hbalock);
5050 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5051 psli->mbox_active = NULL;
5052 phba->link_flag &= ~LS_IGNORE_ERATT;
5053 spin_unlock_irq(&phba->hbalock);
5054
5055 lpfc_hba_down_post(phba);
5056 phba->link_state = LPFC_HBA_ERROR;
5057
5058 return ha_copy & HA_ERATT ? 0 : 1;
5059 }
5060
5061 /**
5062 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
5063 * @phba: Pointer to HBA context object.
5064 *
5065 * This function resets the HBA by writing HC_INITFF to the control
5066 * register. After the HBA resets, this function resets all the iocb ring
5067 * indices. This function disables PCI layer parity checking during
5068 * the reset.
5069 * This function returns 0 always.
5070 * The caller is not required to hold any locks.
5071 **/
5072 int
lpfc_sli_brdreset(struct lpfc_hba * phba)5073 lpfc_sli_brdreset(struct lpfc_hba *phba)
5074 {
5075 struct lpfc_sli *psli;
5076 struct lpfc_sli_ring *pring;
5077 uint16_t cfg_value;
5078 int i;
5079
5080 psli = &phba->sli;
5081
5082 /* Reset HBA */
5083 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5084 "0325 Reset HBA Data: x%x x%x\n",
5085 (phba->pport) ? phba->pport->port_state : 0,
5086 psli->sli_flag);
5087
5088 /* perform board reset */
5089 phba->fc_eventTag = 0;
5090 phba->link_events = 0;
5091 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5092 if (phba->pport) {
5093 phba->pport->fc_myDID = 0;
5094 phba->pport->fc_prevDID = 0;
5095 }
5096
5097 /* Turn off parity checking and serr during the physical reset */
5098 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5099 return -EIO;
5100
5101 pci_write_config_word(phba->pcidev, PCI_COMMAND,
5102 (cfg_value &
5103 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5104
5105 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5106
5107 /* Now toggle INITFF bit in the Host Control Register */
5108 writel(HC_INITFF, phba->HCregaddr);
5109 mdelay(1);
5110 readl(phba->HCregaddr); /* flush */
5111 writel(0, phba->HCregaddr);
5112 readl(phba->HCregaddr); /* flush */
5113
5114 /* Restore PCI cmd register */
5115 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5116
5117 /* Initialize relevant SLI info */
5118 for (i = 0; i < psli->num_rings; i++) {
5119 pring = &psli->sli3_ring[i];
5120 pring->flag = 0;
5121 pring->sli.sli3.rspidx = 0;
5122 pring->sli.sli3.next_cmdidx = 0;
5123 pring->sli.sli3.local_getidx = 0;
5124 pring->sli.sli3.cmdidx = 0;
5125 pring->missbufcnt = 0;
5126 }
5127
5128 phba->link_state = LPFC_WARM_START;
5129 return 0;
5130 }
5131
5132 /**
5133 * lpfc_sli4_brdreset - Reset a sli-4 HBA
5134 * @phba: Pointer to HBA context object.
5135 *
5136 * This function resets a SLI4 HBA. This function disables PCI layer parity
5137 * checking during resets the device. The caller is not required to hold
5138 * any locks.
5139 *
5140 * This function returns 0 on success else returns negative error code.
5141 **/
5142 int
lpfc_sli4_brdreset(struct lpfc_hba * phba)5143 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5144 {
5145 struct lpfc_sli *psli = &phba->sli;
5146 uint16_t cfg_value;
5147 int rc = 0;
5148
5149 /* Reset HBA */
5150 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5151 "0295 Reset HBA Data: x%x x%x x%x\n",
5152 phba->pport->port_state, psli->sli_flag,
5153 phba->hba_flag);
5154
5155 /* perform board reset */
5156 phba->fc_eventTag = 0;
5157 phba->link_events = 0;
5158 phba->pport->fc_myDID = 0;
5159 phba->pport->fc_prevDID = 0;
5160 phba->hba_flag &= ~HBA_SETUP;
5161
5162 spin_lock_irq(&phba->hbalock);
5163 psli->sli_flag &= ~(LPFC_PROCESS_LA);
5164 phba->fcf.fcf_flag = 0;
5165 spin_unlock_irq(&phba->hbalock);
5166
5167 /* Now physically reset the device */
5168 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5169 "0389 Performing PCI function reset!\n");
5170
5171 /* Turn off parity checking and serr during the physical reset */
5172 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5174 "3205 PCI read Config failed\n");
5175 return -EIO;
5176 }
5177
5178 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5179 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5180
5181 /* Perform FCoE PCI function reset before freeing queue memory */
5182 rc = lpfc_pci_function_reset(phba);
5183
5184 /* Restore PCI cmd register */
5185 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5186
5187 return rc;
5188 }
5189
5190 /**
5191 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5192 * @phba: Pointer to HBA context object.
5193 *
5194 * This function is called in the SLI initialization code path to
5195 * restart the HBA. The caller is not required to hold any lock.
5196 * This function writes MBX_RESTART mailbox command to the SLIM and
5197 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5198 * function to free any pending commands. The function enables
5199 * POST only during the first initialization. The function returns zero.
5200 * The function does not guarantee completion of MBX_RESTART mailbox
5201 * command before the return of this function.
5202 **/
5203 static int
lpfc_sli_brdrestart_s3(struct lpfc_hba * phba)5204 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5205 {
5206 volatile struct MAILBOX_word0 mb;
5207 struct lpfc_sli *psli;
5208 void __iomem *to_slim;
5209
5210 spin_lock_irq(&phba->hbalock);
5211
5212 psli = &phba->sli;
5213
5214 /* Restart HBA */
5215 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5216 "0337 Restart HBA Data: x%x x%x\n",
5217 (phba->pport) ? phba->pport->port_state : 0,
5218 psli->sli_flag);
5219
5220 mb.word0 = 0;
5221 mb.mbxCommand = MBX_RESTART;
5222 mb.mbxHc = 1;
5223
5224 lpfc_reset_barrier(phba);
5225
5226 to_slim = phba->MBslimaddr;
5227 writel(mb.word0, to_slim);
5228 readl(to_slim); /* flush */
5229
5230 /* Only skip post after fc_ffinit is completed */
5231 if (phba->pport && phba->pport->port_state)
5232 mb.word0 = 1; /* This is really setting up word1 */
5233 else
5234 mb.word0 = 0; /* This is really setting up word1 */
5235 to_slim = phba->MBslimaddr + sizeof (uint32_t);
5236 writel(mb.word0, to_slim);
5237 readl(to_slim); /* flush */
5238
5239 lpfc_sli_brdreset(phba);
5240 if (phba->pport)
5241 phba->pport->stopped = 0;
5242 phba->link_state = LPFC_INIT_START;
5243 phba->hba_flag = 0;
5244 spin_unlock_irq(&phba->hbalock);
5245
5246 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5247 psli->stats_start = ktime_get_seconds();
5248
5249 /* Give the INITFF and Post time to settle. */
5250 mdelay(100);
5251
5252 lpfc_hba_down_post(phba);
5253
5254 return 0;
5255 }
5256
5257 /**
5258 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5259 * @phba: Pointer to HBA context object.
5260 *
5261 * This function is called in the SLI initialization code path to restart
5262 * a SLI4 HBA. The caller is not required to hold any lock.
5263 * At the end of the function, it calls lpfc_hba_down_post function to
5264 * free any pending commands.
5265 **/
5266 static int
lpfc_sli_brdrestart_s4(struct lpfc_hba * phba)5267 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5268 {
5269 struct lpfc_sli *psli = &phba->sli;
5270 int rc;
5271
5272 /* Restart HBA */
5273 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5274 "0296 Restart HBA Data: x%x x%x\n",
5275 phba->pport->port_state, psli->sli_flag);
5276
5277 rc = lpfc_sli4_brdreset(phba);
5278 if (rc) {
5279 phba->link_state = LPFC_HBA_ERROR;
5280 goto hba_down_queue;
5281 }
5282
5283 spin_lock_irq(&phba->hbalock);
5284 phba->pport->stopped = 0;
5285 phba->link_state = LPFC_INIT_START;
5286 phba->hba_flag = 0;
5287 /* Preserve FA-PWWN expectation */
5288 phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
5289 spin_unlock_irq(&phba->hbalock);
5290
5291 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5292 psli->stats_start = ktime_get_seconds();
5293
5294 hba_down_queue:
5295 lpfc_hba_down_post(phba);
5296 lpfc_sli4_queue_destroy(phba);
5297
5298 return rc;
5299 }
5300
5301 /**
5302 * lpfc_sli_brdrestart - Wrapper func for restarting hba
5303 * @phba: Pointer to HBA context object.
5304 *
5305 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5306 * API jump table function pointer from the lpfc_hba struct.
5307 **/
5308 int
lpfc_sli_brdrestart(struct lpfc_hba * phba)5309 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5310 {
5311 return phba->lpfc_sli_brdrestart(phba);
5312 }
5313
5314 /**
5315 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5316 * @phba: Pointer to HBA context object.
5317 *
5318 * This function is called after a HBA restart to wait for successful
5319 * restart of the HBA. Successful restart of the HBA is indicated by
5320 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5321 * iteration, the function will restart the HBA again. The function returns
5322 * zero if HBA successfully restarted else returns negative error code.
5323 **/
5324 int
lpfc_sli_chipset_init(struct lpfc_hba * phba)5325 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5326 {
5327 uint32_t status, i = 0;
5328
5329 /* Read the HBA Host Status Register */
5330 if (lpfc_readl(phba->HSregaddr, &status))
5331 return -EIO;
5332
5333 /* Check status register to see what current state is */
5334 i = 0;
5335 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5336
5337 /* Check every 10ms for 10 retries, then every 100ms for 90
5338 * retries, then every 1 sec for 50 retires for a total of
5339 * ~60 seconds before reset the board again and check every
5340 * 1 sec for 50 retries. The up to 60 seconds before the
5341 * board ready is required by the Falcon FIPS zeroization
5342 * complete, and any reset the board in between shall cause
5343 * restart of zeroization, further delay the board ready.
5344 */
5345 if (i++ >= 200) {
5346 /* Adapter failed to init, timeout, status reg
5347 <status> */
5348 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5349 "0436 Adapter failed to init, "
5350 "timeout, status reg x%x, "
5351 "FW Data: A8 x%x AC x%x\n", status,
5352 readl(phba->MBslimaddr + 0xa8),
5353 readl(phba->MBslimaddr + 0xac));
5354 phba->link_state = LPFC_HBA_ERROR;
5355 return -ETIMEDOUT;
5356 }
5357
5358 /* Check to see if any errors occurred during init */
5359 if (status & HS_FFERM) {
5360 /* ERROR: During chipset initialization */
5361 /* Adapter failed to init, chipset, status reg
5362 <status> */
5363 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5364 "0437 Adapter failed to init, "
5365 "chipset, status reg x%x, "
5366 "FW Data: A8 x%x AC x%x\n", status,
5367 readl(phba->MBslimaddr + 0xa8),
5368 readl(phba->MBslimaddr + 0xac));
5369 phba->link_state = LPFC_HBA_ERROR;
5370 return -EIO;
5371 }
5372
5373 if (i <= 10)
5374 msleep(10);
5375 else if (i <= 100)
5376 msleep(100);
5377 else
5378 msleep(1000);
5379
5380 if (i == 150) {
5381 /* Do post */
5382 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5383 lpfc_sli_brdrestart(phba);
5384 }
5385 /* Read the HBA Host Status Register */
5386 if (lpfc_readl(phba->HSregaddr, &status))
5387 return -EIO;
5388 }
5389
5390 /* Check to see if any errors occurred during init */
5391 if (status & HS_FFERM) {
5392 /* ERROR: During chipset initialization */
5393 /* Adapter failed to init, chipset, status reg <status> */
5394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5395 "0438 Adapter failed to init, chipset, "
5396 "status reg x%x, "
5397 "FW Data: A8 x%x AC x%x\n", status,
5398 readl(phba->MBslimaddr + 0xa8),
5399 readl(phba->MBslimaddr + 0xac));
5400 phba->link_state = LPFC_HBA_ERROR;
5401 return -EIO;
5402 }
5403
5404 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5405
5406 /* Clear all interrupt enable conditions */
5407 writel(0, phba->HCregaddr);
5408 readl(phba->HCregaddr); /* flush */
5409
5410 /* setup host attn register */
5411 writel(0xffffffff, phba->HAregaddr);
5412 readl(phba->HAregaddr); /* flush */
5413 return 0;
5414 }
5415
5416 /**
5417 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5418 *
5419 * This function calculates and returns the number of HBQs required to be
5420 * configured.
5421 **/
5422 int
lpfc_sli_hbq_count(void)5423 lpfc_sli_hbq_count(void)
5424 {
5425 return ARRAY_SIZE(lpfc_hbq_defs);
5426 }
5427
5428 /**
5429 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5430 *
5431 * This function adds the number of hbq entries in every HBQ to get
5432 * the total number of hbq entries required for the HBA and returns
5433 * the total count.
5434 **/
5435 static int
lpfc_sli_hbq_entry_count(void)5436 lpfc_sli_hbq_entry_count(void)
5437 {
5438 int hbq_count = lpfc_sli_hbq_count();
5439 int count = 0;
5440 int i;
5441
5442 for (i = 0; i < hbq_count; ++i)
5443 count += lpfc_hbq_defs[i]->entry_count;
5444 return count;
5445 }
5446
5447 /**
5448 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5449 *
5450 * This function calculates amount of memory required for all hbq entries
5451 * to be configured and returns the total memory required.
5452 **/
5453 int
lpfc_sli_hbq_size(void)5454 lpfc_sli_hbq_size(void)
5455 {
5456 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5457 }
5458
5459 /**
5460 * lpfc_sli_hbq_setup - configure and initialize HBQs
5461 * @phba: Pointer to HBA context object.
5462 *
5463 * This function is called during the SLI initialization to configure
5464 * all the HBQs and post buffers to the HBQ. The caller is not
5465 * required to hold any locks. This function will return zero if successful
5466 * else it will return negative error code.
5467 **/
5468 static int
lpfc_sli_hbq_setup(struct lpfc_hba * phba)5469 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5470 {
5471 int hbq_count = lpfc_sli_hbq_count();
5472 LPFC_MBOXQ_t *pmb;
5473 MAILBOX_t *pmbox;
5474 uint32_t hbqno;
5475 uint32_t hbq_entry_index;
5476
5477 /* Get a Mailbox buffer to setup mailbox
5478 * commands for HBA initialization
5479 */
5480 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5481
5482 if (!pmb)
5483 return -ENOMEM;
5484
5485 pmbox = &pmb->u.mb;
5486
5487 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5488 phba->link_state = LPFC_INIT_MBX_CMDS;
5489 phba->hbq_in_use = 1;
5490
5491 hbq_entry_index = 0;
5492 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5493 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5494 phba->hbqs[hbqno].hbqPutIdx = 0;
5495 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5496 phba->hbqs[hbqno].entry_count =
5497 lpfc_hbq_defs[hbqno]->entry_count;
5498 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5499 hbq_entry_index, pmb);
5500 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5501
5502 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5503 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5504 mbxStatus <status>, ring <num> */
5505
5506 lpfc_printf_log(phba, KERN_ERR,
5507 LOG_SLI | LOG_VPORT,
5508 "1805 Adapter failed to init. "
5509 "Data: x%x x%x x%x\n",
5510 pmbox->mbxCommand,
5511 pmbox->mbxStatus, hbqno);
5512
5513 phba->link_state = LPFC_HBA_ERROR;
5514 mempool_free(pmb, phba->mbox_mem_pool);
5515 return -ENXIO;
5516 }
5517 }
5518 phba->hbq_count = hbq_count;
5519
5520 mempool_free(pmb, phba->mbox_mem_pool);
5521
5522 /* Initially populate or replenish the HBQs */
5523 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5524 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5525 return 0;
5526 }
5527
5528 /**
5529 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5530 * @phba: Pointer to HBA context object.
5531 *
5532 * This function is called during the SLI initialization to configure
5533 * all the HBQs and post buffers to the HBQ. The caller is not
5534 * required to hold any locks. This function will return zero if successful
5535 * else it will return negative error code.
5536 **/
5537 static int
lpfc_sli4_rb_setup(struct lpfc_hba * phba)5538 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5539 {
5540 phba->hbq_in_use = 1;
5541 /**
5542 * Specific case when the MDS diagnostics is enabled and supported.
5543 * The receive buffer count is truncated to manage the incoming
5544 * traffic.
5545 **/
5546 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5547 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5548 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5549 else
5550 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5551 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5552 phba->hbq_count = 1;
5553 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5554 /* Initially populate or replenish the HBQs */
5555 return 0;
5556 }
5557
5558 /**
5559 * lpfc_sli_config_port - Issue config port mailbox command
5560 * @phba: Pointer to HBA context object.
5561 * @sli_mode: sli mode - 2/3
5562 *
5563 * This function is called by the sli initialization code path
5564 * to issue config_port mailbox command. This function restarts the
5565 * HBA firmware and issues a config_port mailbox command to configure
5566 * the SLI interface in the sli mode specified by sli_mode
5567 * variable. The caller is not required to hold any locks.
5568 * The function returns 0 if successful, else returns negative error
5569 * code.
5570 **/
5571 int
lpfc_sli_config_port(struct lpfc_hba * phba,int sli_mode)5572 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5573 {
5574 LPFC_MBOXQ_t *pmb;
5575 uint32_t resetcount = 0, rc = 0, done = 0;
5576
5577 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5578 if (!pmb) {
5579 phba->link_state = LPFC_HBA_ERROR;
5580 return -ENOMEM;
5581 }
5582
5583 phba->sli_rev = sli_mode;
5584 while (resetcount < 2 && !done) {
5585 spin_lock_irq(&phba->hbalock);
5586 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5587 spin_unlock_irq(&phba->hbalock);
5588 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5589 lpfc_sli_brdrestart(phba);
5590 rc = lpfc_sli_chipset_init(phba);
5591 if (rc)
5592 break;
5593
5594 spin_lock_irq(&phba->hbalock);
5595 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5596 spin_unlock_irq(&phba->hbalock);
5597 resetcount++;
5598
5599 /* Call pre CONFIG_PORT mailbox command initialization. A
5600 * value of 0 means the call was successful. Any other
5601 * nonzero value is a failure, but if ERESTART is returned,
5602 * the driver may reset the HBA and try again.
5603 */
5604 rc = lpfc_config_port_prep(phba);
5605 if (rc == -ERESTART) {
5606 phba->link_state = LPFC_LINK_UNKNOWN;
5607 continue;
5608 } else if (rc)
5609 break;
5610
5611 phba->link_state = LPFC_INIT_MBX_CMDS;
5612 lpfc_config_port(phba, pmb);
5613 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5614 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5615 LPFC_SLI3_HBQ_ENABLED |
5616 LPFC_SLI3_CRP_ENABLED |
5617 LPFC_SLI3_DSS_ENABLED);
5618 if (rc != MBX_SUCCESS) {
5619 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5620 "0442 Adapter failed to init, mbxCmd x%x "
5621 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5622 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5623 spin_lock_irq(&phba->hbalock);
5624 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5625 spin_unlock_irq(&phba->hbalock);
5626 rc = -ENXIO;
5627 } else {
5628 /* Allow asynchronous mailbox command to go through */
5629 spin_lock_irq(&phba->hbalock);
5630 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5631 spin_unlock_irq(&phba->hbalock);
5632 done = 1;
5633
5634 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5635 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5636 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5637 "3110 Port did not grant ASABT\n");
5638 }
5639 }
5640 if (!done) {
5641 rc = -EINVAL;
5642 goto do_prep_failed;
5643 }
5644 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5645 if (!pmb->u.mb.un.varCfgPort.cMA) {
5646 rc = -ENXIO;
5647 goto do_prep_failed;
5648 }
5649 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5650 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5651 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5652 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5653 phba->max_vpi : phba->max_vports;
5654
5655 } else
5656 phba->max_vpi = 0;
5657 if (pmb->u.mb.un.varCfgPort.gerbm)
5658 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5659 if (pmb->u.mb.un.varCfgPort.gcrp)
5660 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5661
5662 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5663 phba->port_gp = phba->mbox->us.s3_pgp.port;
5664
5665 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5666 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5667 phba->cfg_enable_bg = 0;
5668 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5669 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5670 "0443 Adapter did not grant "
5671 "BlockGuard\n");
5672 }
5673 }
5674 } else {
5675 phba->hbq_get = NULL;
5676 phba->port_gp = phba->mbox->us.s2.port;
5677 phba->max_vpi = 0;
5678 }
5679 do_prep_failed:
5680 mempool_free(pmb, phba->mbox_mem_pool);
5681 return rc;
5682 }
5683
5684
5685 /**
5686 * lpfc_sli_hba_setup - SLI initialization function
5687 * @phba: Pointer to HBA context object.
5688 *
5689 * This function is the main SLI initialization function. This function
5690 * is called by the HBA initialization code, HBA reset code and HBA
5691 * error attention handler code. Caller is not required to hold any
5692 * locks. This function issues config_port mailbox command to configure
5693 * the SLI, setup iocb rings and HBQ rings. In the end the function
5694 * calls the config_port_post function to issue init_link mailbox
5695 * command and to start the discovery. The function will return zero
5696 * if successful, else it will return negative error code.
5697 **/
5698 int
lpfc_sli_hba_setup(struct lpfc_hba * phba)5699 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5700 {
5701 uint32_t rc;
5702 int i;
5703 int longs;
5704
5705 /* Enable ISR already does config_port because of config_msi mbx */
5706 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5707 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5708 if (rc)
5709 return -EIO;
5710 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5711 }
5712 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5713
5714 if (phba->sli_rev == 3) {
5715 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5716 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5717 } else {
5718 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5719 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5720 phba->sli3_options = 0;
5721 }
5722
5723 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5724 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5725 phba->sli_rev, phba->max_vpi);
5726 rc = lpfc_sli_ring_map(phba);
5727
5728 if (rc)
5729 goto lpfc_sli_hba_setup_error;
5730
5731 /* Initialize VPIs. */
5732 if (phba->sli_rev == LPFC_SLI_REV3) {
5733 /*
5734 * The VPI bitmask and physical ID array are allocated
5735 * and initialized once only - at driver load. A port
5736 * reset doesn't need to reinitialize this memory.
5737 */
5738 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5739 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5740 phba->vpi_bmask = kcalloc(longs,
5741 sizeof(unsigned long),
5742 GFP_KERNEL);
5743 if (!phba->vpi_bmask) {
5744 rc = -ENOMEM;
5745 goto lpfc_sli_hba_setup_error;
5746 }
5747
5748 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5749 sizeof(uint16_t),
5750 GFP_KERNEL);
5751 if (!phba->vpi_ids) {
5752 kfree(phba->vpi_bmask);
5753 rc = -ENOMEM;
5754 goto lpfc_sli_hba_setup_error;
5755 }
5756 for (i = 0; i < phba->max_vpi; i++)
5757 phba->vpi_ids[i] = i;
5758 }
5759 }
5760
5761 /* Init HBQs */
5762 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5763 rc = lpfc_sli_hbq_setup(phba);
5764 if (rc)
5765 goto lpfc_sli_hba_setup_error;
5766 }
5767 spin_lock_irq(&phba->hbalock);
5768 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5769 spin_unlock_irq(&phba->hbalock);
5770
5771 rc = lpfc_config_port_post(phba);
5772 if (rc)
5773 goto lpfc_sli_hba_setup_error;
5774
5775 return rc;
5776
5777 lpfc_sli_hba_setup_error:
5778 phba->link_state = LPFC_HBA_ERROR;
5779 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5780 "0445 Firmware initialization failed\n");
5781 return rc;
5782 }
5783
5784 /**
5785 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5786 * @phba: Pointer to HBA context object.
5787 *
5788 * This function issue a dump mailbox command to read config region
5789 * 23 and parse the records in the region and populate driver
5790 * data structure.
5791 **/
5792 static int
lpfc_sli4_read_fcoe_params(struct lpfc_hba * phba)5793 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5794 {
5795 LPFC_MBOXQ_t *mboxq;
5796 struct lpfc_dmabuf *mp;
5797 struct lpfc_mqe *mqe;
5798 uint32_t data_length;
5799 int rc;
5800
5801 /* Program the default value of vlan_id and fc_map */
5802 phba->valid_vlan = 0;
5803 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5804 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5805 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5806
5807 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5808 if (!mboxq)
5809 return -ENOMEM;
5810
5811 mqe = &mboxq->u.mqe;
5812 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5813 rc = -ENOMEM;
5814 goto out_free_mboxq;
5815 }
5816
5817 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5818 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5819
5820 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5821 "(%d):2571 Mailbox cmd x%x Status x%x "
5822 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5823 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5824 "CQ: x%x x%x x%x x%x\n",
5825 mboxq->vport ? mboxq->vport->vpi : 0,
5826 bf_get(lpfc_mqe_command, mqe),
5827 bf_get(lpfc_mqe_status, mqe),
5828 mqe->un.mb_words[0], mqe->un.mb_words[1],
5829 mqe->un.mb_words[2], mqe->un.mb_words[3],
5830 mqe->un.mb_words[4], mqe->un.mb_words[5],
5831 mqe->un.mb_words[6], mqe->un.mb_words[7],
5832 mqe->un.mb_words[8], mqe->un.mb_words[9],
5833 mqe->un.mb_words[10], mqe->un.mb_words[11],
5834 mqe->un.mb_words[12], mqe->un.mb_words[13],
5835 mqe->un.mb_words[14], mqe->un.mb_words[15],
5836 mqe->un.mb_words[16], mqe->un.mb_words[50],
5837 mboxq->mcqe.word0,
5838 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5839 mboxq->mcqe.trailer);
5840
5841 if (rc) {
5842 rc = -EIO;
5843 goto out_free_mboxq;
5844 }
5845 data_length = mqe->un.mb_words[5];
5846 if (data_length > DMP_RGN23_SIZE) {
5847 rc = -EIO;
5848 goto out_free_mboxq;
5849 }
5850
5851 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5852 rc = 0;
5853
5854 out_free_mboxq:
5855 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
5856 return rc;
5857 }
5858
5859 /**
5860 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5861 * @phba: pointer to lpfc hba data structure.
5862 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5863 * @vpd: pointer to the memory to hold resulting port vpd data.
5864 * @vpd_size: On input, the number of bytes allocated to @vpd.
5865 * On output, the number of data bytes in @vpd.
5866 *
5867 * This routine executes a READ_REV SLI4 mailbox command. In
5868 * addition, this routine gets the port vpd data.
5869 *
5870 * Return codes
5871 * 0 - successful
5872 * -ENOMEM - could not allocated memory.
5873 **/
5874 static int
lpfc_sli4_read_rev(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint8_t * vpd,uint32_t * vpd_size)5875 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5876 uint8_t *vpd, uint32_t *vpd_size)
5877 {
5878 int rc = 0;
5879 uint32_t dma_size;
5880 struct lpfc_dmabuf *dmabuf;
5881 struct lpfc_mqe *mqe;
5882
5883 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5884 if (!dmabuf)
5885 return -ENOMEM;
5886
5887 /*
5888 * Get a DMA buffer for the vpd data resulting from the READ_REV
5889 * mailbox command.
5890 */
5891 dma_size = *vpd_size;
5892 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5893 &dmabuf->phys, GFP_KERNEL);
5894 if (!dmabuf->virt) {
5895 kfree(dmabuf);
5896 return -ENOMEM;
5897 }
5898
5899 /*
5900 * The SLI4 implementation of READ_REV conflicts at word1,
5901 * bits 31:16 and SLI4 adds vpd functionality not present
5902 * in SLI3. This code corrects the conflicts.
5903 */
5904 lpfc_read_rev(phba, mboxq);
5905 mqe = &mboxq->u.mqe;
5906 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5907 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5908 mqe->un.read_rev.word1 &= 0x0000FFFF;
5909 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5910 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5911
5912 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5913 if (rc) {
5914 dma_free_coherent(&phba->pcidev->dev, dma_size,
5915 dmabuf->virt, dmabuf->phys);
5916 kfree(dmabuf);
5917 return -EIO;
5918 }
5919
5920 /*
5921 * The available vpd length cannot be bigger than the
5922 * DMA buffer passed to the port. Catch the less than
5923 * case and update the caller's size.
5924 */
5925 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5926 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5927
5928 memcpy(vpd, dmabuf->virt, *vpd_size);
5929
5930 dma_free_coherent(&phba->pcidev->dev, dma_size,
5931 dmabuf->virt, dmabuf->phys);
5932 kfree(dmabuf);
5933 return 0;
5934 }
5935
5936 /**
5937 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5938 * @phba: pointer to lpfc hba data structure.
5939 *
5940 * This routine retrieves SLI4 device physical port name this PCI function
5941 * is attached to.
5942 *
5943 * Return codes
5944 * 0 - successful
5945 * otherwise - failed to retrieve controller attributes
5946 **/
5947 static int
lpfc_sli4_get_ctl_attr(struct lpfc_hba * phba)5948 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5949 {
5950 LPFC_MBOXQ_t *mboxq;
5951 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5952 struct lpfc_controller_attribute *cntl_attr;
5953 void *virtaddr = NULL;
5954 uint32_t alloclen, reqlen;
5955 uint32_t shdr_status, shdr_add_status;
5956 union lpfc_sli4_cfg_shdr *shdr;
5957 int rc;
5958
5959 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5960 if (!mboxq)
5961 return -ENOMEM;
5962
5963 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5964 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5965 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5966 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5967 LPFC_SLI4_MBX_NEMBED);
5968
5969 if (alloclen < reqlen) {
5970 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5971 "3084 Allocated DMA memory size (%d) is "
5972 "less than the requested DMA memory size "
5973 "(%d)\n", alloclen, reqlen);
5974 rc = -ENOMEM;
5975 goto out_free_mboxq;
5976 }
5977 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5978 virtaddr = mboxq->sge_array->addr[0];
5979 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5980 shdr = &mbx_cntl_attr->cfg_shdr;
5981 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5982 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5983 if (shdr_status || shdr_add_status || rc) {
5984 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5985 "3085 Mailbox x%x (x%x/x%x) failed, "
5986 "rc:x%x, status:x%x, add_status:x%x\n",
5987 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5988 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5989 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5990 rc, shdr_status, shdr_add_status);
5991 rc = -ENXIO;
5992 goto out_free_mboxq;
5993 }
5994
5995 cntl_attr = &mbx_cntl_attr->cntl_attr;
5996 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5997 phba->sli4_hba.lnk_info.lnk_tp =
5998 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5999 phba->sli4_hba.lnk_info.lnk_no =
6000 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
6001 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
6002 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
6003
6004 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
6005 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
6006 sizeof(phba->BIOSVersion));
6007
6008 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6009 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
6010 "flash_id: x%02x, asic_rev: x%02x\n",
6011 phba->sli4_hba.lnk_info.lnk_tp,
6012 phba->sli4_hba.lnk_info.lnk_no,
6013 phba->BIOSVersion, phba->sli4_hba.flash_id,
6014 phba->sli4_hba.asic_rev);
6015 out_free_mboxq:
6016 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6017 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6018 else
6019 mempool_free(mboxq, phba->mbox_mem_pool);
6020 return rc;
6021 }
6022
6023 /**
6024 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
6025 * @phba: pointer to lpfc hba data structure.
6026 *
6027 * This routine retrieves SLI4 device physical port name this PCI function
6028 * is attached to.
6029 *
6030 * Return codes
6031 * 0 - successful
6032 * otherwise - failed to retrieve physical port name
6033 **/
6034 static int
lpfc_sli4_retrieve_pport_name(struct lpfc_hba * phba)6035 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6036 {
6037 LPFC_MBOXQ_t *mboxq;
6038 struct lpfc_mbx_get_port_name *get_port_name;
6039 uint32_t shdr_status, shdr_add_status;
6040 union lpfc_sli4_cfg_shdr *shdr;
6041 char cport_name = 0;
6042 int rc;
6043
6044 /* We assume nothing at this point */
6045 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6046 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6047
6048 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6049 if (!mboxq)
6050 return -ENOMEM;
6051 /* obtain link type and link number via READ_CONFIG */
6052 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6053 lpfc_sli4_read_config(phba);
6054
6055 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
6056 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
6057
6058 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6059 goto retrieve_ppname;
6060
6061 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
6062 rc = lpfc_sli4_get_ctl_attr(phba);
6063 if (rc)
6064 goto out_free_mboxq;
6065
6066 retrieve_ppname:
6067 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6068 LPFC_MBOX_OPCODE_GET_PORT_NAME,
6069 sizeof(struct lpfc_mbx_get_port_name) -
6070 sizeof(struct lpfc_sli4_cfg_mhdr),
6071 LPFC_SLI4_MBX_EMBED);
6072 get_port_name = &mboxq->u.mqe.un.get_port_name;
6073 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6074 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6075 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6076 phba->sli4_hba.lnk_info.lnk_tp);
6077 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6078 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6079 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6080 if (shdr_status || shdr_add_status || rc) {
6081 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6082 "3087 Mailbox x%x (x%x/x%x) failed: "
6083 "rc:x%x, status:x%x, add_status:x%x\n",
6084 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6085 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6086 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6087 rc, shdr_status, shdr_add_status);
6088 rc = -ENXIO;
6089 goto out_free_mboxq;
6090 }
6091 switch (phba->sli4_hba.lnk_info.lnk_no) {
6092 case LPFC_LINK_NUMBER_0:
6093 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6094 &get_port_name->u.response);
6095 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6096 break;
6097 case LPFC_LINK_NUMBER_1:
6098 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6099 &get_port_name->u.response);
6100 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6101 break;
6102 case LPFC_LINK_NUMBER_2:
6103 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6104 &get_port_name->u.response);
6105 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6106 break;
6107 case LPFC_LINK_NUMBER_3:
6108 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6109 &get_port_name->u.response);
6110 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6111 break;
6112 default:
6113 break;
6114 }
6115
6116 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6117 phba->Port[0] = cport_name;
6118 phba->Port[1] = '\0';
6119 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6120 "3091 SLI get port name: %s\n", phba->Port);
6121 }
6122
6123 out_free_mboxq:
6124 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6125 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6126 else
6127 mempool_free(mboxq, phba->mbox_mem_pool);
6128 return rc;
6129 }
6130
6131 /**
6132 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6133 * @phba: pointer to lpfc hba data structure.
6134 *
6135 * This routine is called to explicitly arm the SLI4 device's completion and
6136 * event queues
6137 **/
6138 static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba * phba)6139 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6140 {
6141 int qidx;
6142 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6143 struct lpfc_sli4_hdw_queue *qp;
6144 struct lpfc_queue *eq;
6145
6146 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6147 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6148 if (sli4_hba->nvmels_cq)
6149 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6150 LPFC_QUEUE_REARM);
6151
6152 if (sli4_hba->hdwq) {
6153 /* Loop thru all Hardware Queues */
6154 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6155 qp = &sli4_hba->hdwq[qidx];
6156 /* ARM the corresponding CQ */
6157 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6158 LPFC_QUEUE_REARM);
6159 }
6160
6161 /* Loop thru all IRQ vectors */
6162 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6163 eq = sli4_hba->hba_eq_hdl[qidx].eq;
6164 /* ARM the corresponding EQ */
6165 sli4_hba->sli4_write_eq_db(phba, eq,
6166 0, LPFC_QUEUE_REARM);
6167 }
6168 }
6169
6170 if (phba->nvmet_support) {
6171 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6172 sli4_hba->sli4_write_cq_db(phba,
6173 sli4_hba->nvmet_cqset[qidx], 0,
6174 LPFC_QUEUE_REARM);
6175 }
6176 }
6177 }
6178
6179 /**
6180 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6181 * @phba: Pointer to HBA context object.
6182 * @type: The resource extent type.
6183 * @extnt_count: buffer to hold port available extent count.
6184 * @extnt_size: buffer to hold element count per extent.
6185 *
6186 * This function calls the port and retrievs the number of available
6187 * extents and their size for a particular extent type.
6188 *
6189 * Returns: 0 if successful. Nonzero otherwise.
6190 **/
6191 int
lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba * phba,uint16_t type,uint16_t * extnt_count,uint16_t * extnt_size)6192 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6193 uint16_t *extnt_count, uint16_t *extnt_size)
6194 {
6195 int rc = 0;
6196 uint32_t length;
6197 uint32_t mbox_tmo;
6198 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6199 LPFC_MBOXQ_t *mbox;
6200
6201 *extnt_count = 0;
6202 *extnt_size = 0;
6203
6204 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6205 if (!mbox)
6206 return -ENOMEM;
6207
6208 /* Find out how many extents are available for this resource type */
6209 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6210 sizeof(struct lpfc_sli4_cfg_mhdr));
6211 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6212 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6213 length, LPFC_SLI4_MBX_EMBED);
6214
6215 /* Send an extents count of 0 - the GET doesn't use it. */
6216 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6217 LPFC_SLI4_MBX_EMBED);
6218 if (unlikely(rc)) {
6219 rc = -EIO;
6220 goto err_exit;
6221 }
6222
6223 if (!phba->sli4_hba.intr_enable)
6224 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6225 else {
6226 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6227 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6228 }
6229 if (unlikely(rc)) {
6230 rc = -EIO;
6231 goto err_exit;
6232 }
6233
6234 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6235 if (bf_get(lpfc_mbox_hdr_status,
6236 &rsrc_info->header.cfg_shdr.response)) {
6237 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6238 "2930 Failed to get resource extents "
6239 "Status 0x%x Add'l Status 0x%x\n",
6240 bf_get(lpfc_mbox_hdr_status,
6241 &rsrc_info->header.cfg_shdr.response),
6242 bf_get(lpfc_mbox_hdr_add_status,
6243 &rsrc_info->header.cfg_shdr.response));
6244 rc = -EIO;
6245 goto err_exit;
6246 }
6247
6248 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6249 &rsrc_info->u.rsp);
6250 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6251 &rsrc_info->u.rsp);
6252
6253 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6254 "3162 Retrieved extents type-%d from port: count:%d, "
6255 "size:%d\n", type, *extnt_count, *extnt_size);
6256
6257 err_exit:
6258 mempool_free(mbox, phba->mbox_mem_pool);
6259 return rc;
6260 }
6261
6262 /**
6263 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6264 * @phba: Pointer to HBA context object.
6265 * @type: The extent type to check.
6266 *
6267 * This function reads the current available extents from the port and checks
6268 * if the extent count or extent size has changed since the last access.
6269 * Callers use this routine post port reset to understand if there is a
6270 * extent reprovisioning requirement.
6271 *
6272 * Returns:
6273 * -Error: error indicates problem.
6274 * 1: Extent count or size has changed.
6275 * 0: No changes.
6276 **/
6277 static int
lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba * phba,uint16_t type)6278 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6279 {
6280 uint16_t curr_ext_cnt, rsrc_ext_cnt;
6281 uint16_t size_diff, rsrc_ext_size;
6282 int rc = 0;
6283 struct lpfc_rsrc_blks *rsrc_entry;
6284 struct list_head *rsrc_blk_list = NULL;
6285
6286 size_diff = 0;
6287 curr_ext_cnt = 0;
6288 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6289 &rsrc_ext_cnt,
6290 &rsrc_ext_size);
6291 if (unlikely(rc))
6292 return -EIO;
6293
6294 switch (type) {
6295 case LPFC_RSC_TYPE_FCOE_RPI:
6296 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6297 break;
6298 case LPFC_RSC_TYPE_FCOE_VPI:
6299 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6300 break;
6301 case LPFC_RSC_TYPE_FCOE_XRI:
6302 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6303 break;
6304 case LPFC_RSC_TYPE_FCOE_VFI:
6305 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6306 break;
6307 default:
6308 break;
6309 }
6310
6311 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6312 curr_ext_cnt++;
6313 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6314 size_diff++;
6315 }
6316
6317 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6318 rc = 1;
6319
6320 return rc;
6321 }
6322
6323 /**
6324 * lpfc_sli4_cfg_post_extnts -
6325 * @phba: Pointer to HBA context object.
6326 * @extnt_cnt: number of available extents.
6327 * @type: the extent type (rpi, xri, vfi, vpi).
6328 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6329 * @mbox: pointer to the caller's allocated mailbox structure.
6330 *
6331 * This function executes the extents allocation request. It also
6332 * takes care of the amount of memory needed to allocate or get the
6333 * allocated extents. It is the caller's responsibility to evaluate
6334 * the response.
6335 *
6336 * Returns:
6337 * -Error: Error value describes the condition found.
6338 * 0: if successful
6339 **/
6340 static int
lpfc_sli4_cfg_post_extnts(struct lpfc_hba * phba,uint16_t extnt_cnt,uint16_t type,bool * emb,LPFC_MBOXQ_t * mbox)6341 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6342 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6343 {
6344 int rc = 0;
6345 uint32_t req_len;
6346 uint32_t emb_len;
6347 uint32_t alloc_len, mbox_tmo;
6348
6349 /* Calculate the total requested length of the dma memory */
6350 req_len = extnt_cnt * sizeof(uint16_t);
6351
6352 /*
6353 * Calculate the size of an embedded mailbox. The uint32_t
6354 * accounts for extents-specific word.
6355 */
6356 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6357 sizeof(uint32_t);
6358
6359 /*
6360 * Presume the allocation and response will fit into an embedded
6361 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6362 */
6363 *emb = LPFC_SLI4_MBX_EMBED;
6364 if (req_len > emb_len) {
6365 req_len = extnt_cnt * sizeof(uint16_t) +
6366 sizeof(union lpfc_sli4_cfg_shdr) +
6367 sizeof(uint32_t);
6368 *emb = LPFC_SLI4_MBX_NEMBED;
6369 }
6370
6371 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6372 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6373 req_len, *emb);
6374 if (alloc_len < req_len) {
6375 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6376 "2982 Allocated DMA memory size (x%x) is "
6377 "less than the requested DMA memory "
6378 "size (x%x)\n", alloc_len, req_len);
6379 return -ENOMEM;
6380 }
6381 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6382 if (unlikely(rc))
6383 return -EIO;
6384
6385 if (!phba->sli4_hba.intr_enable)
6386 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6387 else {
6388 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6389 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6390 }
6391
6392 if (unlikely(rc))
6393 rc = -EIO;
6394 return rc;
6395 }
6396
6397 /**
6398 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6399 * @phba: Pointer to HBA context object.
6400 * @type: The resource extent type to allocate.
6401 *
6402 * This function allocates the number of elements for the specified
6403 * resource type.
6404 **/
6405 static int
lpfc_sli4_alloc_extent(struct lpfc_hba * phba,uint16_t type)6406 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6407 {
6408 bool emb = false;
6409 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6410 uint16_t rsrc_id, rsrc_start, j, k;
6411 uint16_t *ids;
6412 int i, rc;
6413 unsigned long longs;
6414 unsigned long *bmask;
6415 struct lpfc_rsrc_blks *rsrc_blks;
6416 LPFC_MBOXQ_t *mbox;
6417 uint32_t length;
6418 struct lpfc_id_range *id_array = NULL;
6419 void *virtaddr = NULL;
6420 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6421 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6422 struct list_head *ext_blk_list;
6423
6424 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6425 &rsrc_cnt,
6426 &rsrc_size);
6427 if (unlikely(rc))
6428 return -EIO;
6429
6430 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6431 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6432 "3009 No available Resource Extents "
6433 "for resource type 0x%x: Count: 0x%x, "
6434 "Size 0x%x\n", type, rsrc_cnt,
6435 rsrc_size);
6436 return -ENOMEM;
6437 }
6438
6439 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6440 "2903 Post resource extents type-0x%x: "
6441 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6442
6443 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6444 if (!mbox)
6445 return -ENOMEM;
6446
6447 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6448 if (unlikely(rc)) {
6449 rc = -EIO;
6450 goto err_exit;
6451 }
6452
6453 /*
6454 * Figure out where the response is located. Then get local pointers
6455 * to the response data. The port does not guarantee to respond to
6456 * all extents counts request so update the local variable with the
6457 * allocated count from the port.
6458 */
6459 if (emb == LPFC_SLI4_MBX_EMBED) {
6460 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6461 id_array = &rsrc_ext->u.rsp.id[0];
6462 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6463 } else {
6464 virtaddr = mbox->sge_array->addr[0];
6465 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6466 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6467 id_array = &n_rsrc->id;
6468 }
6469
6470 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6471 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6472
6473 /*
6474 * Based on the resource size and count, correct the base and max
6475 * resource values.
6476 */
6477 length = sizeof(struct lpfc_rsrc_blks);
6478 switch (type) {
6479 case LPFC_RSC_TYPE_FCOE_RPI:
6480 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6481 sizeof(unsigned long),
6482 GFP_KERNEL);
6483 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6484 rc = -ENOMEM;
6485 goto err_exit;
6486 }
6487 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6488 sizeof(uint16_t),
6489 GFP_KERNEL);
6490 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6491 kfree(phba->sli4_hba.rpi_bmask);
6492 rc = -ENOMEM;
6493 goto err_exit;
6494 }
6495
6496 /*
6497 * The next_rpi was initialized with the maximum available
6498 * count but the port may allocate a smaller number. Catch
6499 * that case and update the next_rpi.
6500 */
6501 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6502
6503 /* Initialize local ptrs for common extent processing later. */
6504 bmask = phba->sli4_hba.rpi_bmask;
6505 ids = phba->sli4_hba.rpi_ids;
6506 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6507 break;
6508 case LPFC_RSC_TYPE_FCOE_VPI:
6509 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6510 GFP_KERNEL);
6511 if (unlikely(!phba->vpi_bmask)) {
6512 rc = -ENOMEM;
6513 goto err_exit;
6514 }
6515 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6516 GFP_KERNEL);
6517 if (unlikely(!phba->vpi_ids)) {
6518 kfree(phba->vpi_bmask);
6519 rc = -ENOMEM;
6520 goto err_exit;
6521 }
6522
6523 /* Initialize local ptrs for common extent processing later. */
6524 bmask = phba->vpi_bmask;
6525 ids = phba->vpi_ids;
6526 ext_blk_list = &phba->lpfc_vpi_blk_list;
6527 break;
6528 case LPFC_RSC_TYPE_FCOE_XRI:
6529 phba->sli4_hba.xri_bmask = kcalloc(longs,
6530 sizeof(unsigned long),
6531 GFP_KERNEL);
6532 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6533 rc = -ENOMEM;
6534 goto err_exit;
6535 }
6536 phba->sli4_hba.max_cfg_param.xri_used = 0;
6537 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6538 sizeof(uint16_t),
6539 GFP_KERNEL);
6540 if (unlikely(!phba->sli4_hba.xri_ids)) {
6541 kfree(phba->sli4_hba.xri_bmask);
6542 rc = -ENOMEM;
6543 goto err_exit;
6544 }
6545
6546 /* Initialize local ptrs for common extent processing later. */
6547 bmask = phba->sli4_hba.xri_bmask;
6548 ids = phba->sli4_hba.xri_ids;
6549 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6550 break;
6551 case LPFC_RSC_TYPE_FCOE_VFI:
6552 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6553 sizeof(unsigned long),
6554 GFP_KERNEL);
6555 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6556 rc = -ENOMEM;
6557 goto err_exit;
6558 }
6559 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6560 sizeof(uint16_t),
6561 GFP_KERNEL);
6562 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6563 kfree(phba->sli4_hba.vfi_bmask);
6564 rc = -ENOMEM;
6565 goto err_exit;
6566 }
6567
6568 /* Initialize local ptrs for common extent processing later. */
6569 bmask = phba->sli4_hba.vfi_bmask;
6570 ids = phba->sli4_hba.vfi_ids;
6571 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6572 break;
6573 default:
6574 /* Unsupported Opcode. Fail call. */
6575 id_array = NULL;
6576 bmask = NULL;
6577 ids = NULL;
6578 ext_blk_list = NULL;
6579 goto err_exit;
6580 }
6581
6582 /*
6583 * Complete initializing the extent configuration with the
6584 * allocated ids assigned to this function. The bitmask serves
6585 * as an index into the array and manages the available ids. The
6586 * array just stores the ids communicated to the port via the wqes.
6587 */
6588 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6589 if ((i % 2) == 0)
6590 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6591 &id_array[k]);
6592 else
6593 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6594 &id_array[k]);
6595
6596 rsrc_blks = kzalloc(length, GFP_KERNEL);
6597 if (unlikely(!rsrc_blks)) {
6598 rc = -ENOMEM;
6599 kfree(bmask);
6600 kfree(ids);
6601 goto err_exit;
6602 }
6603 rsrc_blks->rsrc_start = rsrc_id;
6604 rsrc_blks->rsrc_size = rsrc_size;
6605 list_add_tail(&rsrc_blks->list, ext_blk_list);
6606 rsrc_start = rsrc_id;
6607 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6608 phba->sli4_hba.io_xri_start = rsrc_start +
6609 lpfc_sli4_get_iocb_cnt(phba);
6610 }
6611
6612 while (rsrc_id < (rsrc_start + rsrc_size)) {
6613 ids[j] = rsrc_id;
6614 rsrc_id++;
6615 j++;
6616 }
6617 /* Entire word processed. Get next word.*/
6618 if ((i % 2) == 1)
6619 k++;
6620 }
6621 err_exit:
6622 lpfc_sli4_mbox_cmd_free(phba, mbox);
6623 return rc;
6624 }
6625
6626
6627
6628 /**
6629 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6630 * @phba: Pointer to HBA context object.
6631 * @type: the extent's type.
6632 *
6633 * This function deallocates all extents of a particular resource type.
6634 * SLI4 does not allow for deallocating a particular extent range. It
6635 * is the caller's responsibility to release all kernel memory resources.
6636 **/
6637 static int
lpfc_sli4_dealloc_extent(struct lpfc_hba * phba,uint16_t type)6638 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6639 {
6640 int rc;
6641 uint32_t length, mbox_tmo = 0;
6642 LPFC_MBOXQ_t *mbox;
6643 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6644 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6645
6646 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6647 if (!mbox)
6648 return -ENOMEM;
6649
6650 /*
6651 * This function sends an embedded mailbox because it only sends the
6652 * the resource type. All extents of this type are released by the
6653 * port.
6654 */
6655 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6656 sizeof(struct lpfc_sli4_cfg_mhdr));
6657 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6658 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6659 length, LPFC_SLI4_MBX_EMBED);
6660
6661 /* Send an extents count of 0 - the dealloc doesn't use it. */
6662 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6663 LPFC_SLI4_MBX_EMBED);
6664 if (unlikely(rc)) {
6665 rc = -EIO;
6666 goto out_free_mbox;
6667 }
6668 if (!phba->sli4_hba.intr_enable)
6669 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6670 else {
6671 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6672 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6673 }
6674 if (unlikely(rc)) {
6675 rc = -EIO;
6676 goto out_free_mbox;
6677 }
6678
6679 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6680 if (bf_get(lpfc_mbox_hdr_status,
6681 &dealloc_rsrc->header.cfg_shdr.response)) {
6682 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6683 "2919 Failed to release resource extents "
6684 "for type %d - Status 0x%x Add'l Status 0x%x. "
6685 "Resource memory not released.\n",
6686 type,
6687 bf_get(lpfc_mbox_hdr_status,
6688 &dealloc_rsrc->header.cfg_shdr.response),
6689 bf_get(lpfc_mbox_hdr_add_status,
6690 &dealloc_rsrc->header.cfg_shdr.response));
6691 rc = -EIO;
6692 goto out_free_mbox;
6693 }
6694
6695 /* Release kernel memory resources for the specific type. */
6696 switch (type) {
6697 case LPFC_RSC_TYPE_FCOE_VPI:
6698 kfree(phba->vpi_bmask);
6699 kfree(phba->vpi_ids);
6700 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6701 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6702 &phba->lpfc_vpi_blk_list, list) {
6703 list_del_init(&rsrc_blk->list);
6704 kfree(rsrc_blk);
6705 }
6706 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6707 break;
6708 case LPFC_RSC_TYPE_FCOE_XRI:
6709 kfree(phba->sli4_hba.xri_bmask);
6710 kfree(phba->sli4_hba.xri_ids);
6711 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6712 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6713 list_del_init(&rsrc_blk->list);
6714 kfree(rsrc_blk);
6715 }
6716 break;
6717 case LPFC_RSC_TYPE_FCOE_VFI:
6718 kfree(phba->sli4_hba.vfi_bmask);
6719 kfree(phba->sli4_hba.vfi_ids);
6720 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6721 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6722 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6723 list_del_init(&rsrc_blk->list);
6724 kfree(rsrc_blk);
6725 }
6726 break;
6727 case LPFC_RSC_TYPE_FCOE_RPI:
6728 /* RPI bitmask and physical id array are cleaned up earlier. */
6729 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6730 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6731 list_del_init(&rsrc_blk->list);
6732 kfree(rsrc_blk);
6733 }
6734 break;
6735 default:
6736 break;
6737 }
6738
6739 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6740
6741 out_free_mbox:
6742 mempool_free(mbox, phba->mbox_mem_pool);
6743 return rc;
6744 }
6745
6746 static void
lpfc_set_features(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox,uint32_t feature)6747 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6748 uint32_t feature)
6749 {
6750 uint32_t len;
6751 u32 sig_freq = 0;
6752
6753 len = sizeof(struct lpfc_mbx_set_feature) -
6754 sizeof(struct lpfc_sli4_cfg_mhdr);
6755 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6756 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6757 LPFC_SLI4_MBX_EMBED);
6758
6759 switch (feature) {
6760 case LPFC_SET_UE_RECOVERY:
6761 bf_set(lpfc_mbx_set_feature_UER,
6762 &mbox->u.mqe.un.set_feature, 1);
6763 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6764 mbox->u.mqe.un.set_feature.param_len = 8;
6765 break;
6766 case LPFC_SET_MDS_DIAGS:
6767 bf_set(lpfc_mbx_set_feature_mds,
6768 &mbox->u.mqe.un.set_feature, 1);
6769 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6770 &mbox->u.mqe.un.set_feature, 1);
6771 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6772 mbox->u.mqe.un.set_feature.param_len = 8;
6773 break;
6774 case LPFC_SET_CGN_SIGNAL:
6775 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6776 sig_freq = 0;
6777 else
6778 sig_freq = phba->cgn_sig_freq;
6779
6780 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6781 bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6782 &mbox->u.mqe.un.set_feature, sig_freq);
6783 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6784 &mbox->u.mqe.un.set_feature, sig_freq);
6785 }
6786
6787 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6788 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6789 &mbox->u.mqe.un.set_feature, sig_freq);
6790
6791 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6792 phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6793 sig_freq = 0;
6794 else
6795 sig_freq = lpfc_acqe_cgn_frequency;
6796
6797 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6798 &mbox->u.mqe.un.set_feature, sig_freq);
6799
6800 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6801 mbox->u.mqe.un.set_feature.param_len = 12;
6802 break;
6803 case LPFC_SET_DUAL_DUMP:
6804 bf_set(lpfc_mbx_set_feature_dd,
6805 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6806 bf_set(lpfc_mbx_set_feature_ddquery,
6807 &mbox->u.mqe.un.set_feature, 0);
6808 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6809 mbox->u.mqe.un.set_feature.param_len = 4;
6810 break;
6811 case LPFC_SET_ENABLE_MI:
6812 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6813 mbox->u.mqe.un.set_feature.param_len = 4;
6814 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6815 phba->pport->cfg_lun_queue_depth);
6816 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6817 phba->sli4_hba.pc_sli4_params.mi_ver);
6818 break;
6819 case LPFC_SET_LD_SIGNAL:
6820 mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
6821 mbox->u.mqe.un.set_feature.param_len = 16;
6822 bf_set(lpfc_mbx_set_feature_lds_qry,
6823 &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
6824 break;
6825 case LPFC_SET_ENABLE_CMF:
6826 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6827 mbox->u.mqe.un.set_feature.param_len = 4;
6828 bf_set(lpfc_mbx_set_feature_cmf,
6829 &mbox->u.mqe.un.set_feature, 1);
6830 break;
6831 }
6832 return;
6833 }
6834
6835 /**
6836 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6837 * @phba: Pointer to HBA context object.
6838 *
6839 * Disable FW logging into host memory on the adapter. To
6840 * be done before reading logs from the host memory.
6841 **/
6842 void
lpfc_ras_stop_fwlog(struct lpfc_hba * phba)6843 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6844 {
6845 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6846
6847 spin_lock_irq(&phba->ras_fwlog_lock);
6848 ras_fwlog->state = INACTIVE;
6849 spin_unlock_irq(&phba->ras_fwlog_lock);
6850
6851 /* Disable FW logging to host memory */
6852 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6853 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6854
6855 /* Wait 10ms for firmware to stop using DMA buffer */
6856 usleep_range(10 * 1000, 20 * 1000);
6857 }
6858
6859 /**
6860 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6861 * @phba: Pointer to HBA context object.
6862 *
6863 * This function is called to free memory allocated for RAS FW logging
6864 * support in the driver.
6865 **/
6866 void
lpfc_sli4_ras_dma_free(struct lpfc_hba * phba)6867 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6868 {
6869 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6870 struct lpfc_dmabuf *dmabuf, *next;
6871
6872 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6873 list_for_each_entry_safe(dmabuf, next,
6874 &ras_fwlog->fwlog_buff_list,
6875 list) {
6876 list_del(&dmabuf->list);
6877 dma_free_coherent(&phba->pcidev->dev,
6878 LPFC_RAS_MAX_ENTRY_SIZE,
6879 dmabuf->virt, dmabuf->phys);
6880 kfree(dmabuf);
6881 }
6882 }
6883
6884 if (ras_fwlog->lwpd.virt) {
6885 dma_free_coherent(&phba->pcidev->dev,
6886 sizeof(uint32_t) * 2,
6887 ras_fwlog->lwpd.virt,
6888 ras_fwlog->lwpd.phys);
6889 ras_fwlog->lwpd.virt = NULL;
6890 }
6891
6892 spin_lock_irq(&phba->ras_fwlog_lock);
6893 ras_fwlog->state = INACTIVE;
6894 spin_unlock_irq(&phba->ras_fwlog_lock);
6895 }
6896
6897 /**
6898 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6899 * @phba: Pointer to HBA context object.
6900 * @fwlog_buff_count: Count of buffers to be created.
6901 *
6902 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6903 * to update FW log is posted to the adapter.
6904 * Buffer count is calculated based on module param ras_fwlog_buffsize
6905 * Size of each buffer posted to FW is 64K.
6906 **/
6907
6908 static int
lpfc_sli4_ras_dma_alloc(struct lpfc_hba * phba,uint32_t fwlog_buff_count)6909 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6910 uint32_t fwlog_buff_count)
6911 {
6912 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6913 struct lpfc_dmabuf *dmabuf;
6914 int rc = 0, i = 0;
6915
6916 /* Initialize List */
6917 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6918
6919 /* Allocate memory for the LWPD */
6920 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6921 sizeof(uint32_t) * 2,
6922 &ras_fwlog->lwpd.phys,
6923 GFP_KERNEL);
6924 if (!ras_fwlog->lwpd.virt) {
6925 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6926 "6185 LWPD Memory Alloc Failed\n");
6927
6928 return -ENOMEM;
6929 }
6930
6931 ras_fwlog->fw_buffcount = fwlog_buff_count;
6932 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6933 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6934 GFP_KERNEL);
6935 if (!dmabuf) {
6936 rc = -ENOMEM;
6937 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6938 "6186 Memory Alloc failed FW logging");
6939 goto free_mem;
6940 }
6941
6942 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6943 LPFC_RAS_MAX_ENTRY_SIZE,
6944 &dmabuf->phys, GFP_KERNEL);
6945 if (!dmabuf->virt) {
6946 kfree(dmabuf);
6947 rc = -ENOMEM;
6948 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6949 "6187 DMA Alloc Failed FW logging");
6950 goto free_mem;
6951 }
6952 dmabuf->buffer_tag = i;
6953 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6954 }
6955
6956 free_mem:
6957 if (rc)
6958 lpfc_sli4_ras_dma_free(phba);
6959
6960 return rc;
6961 }
6962
6963 /**
6964 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6965 * @phba: pointer to lpfc hba data structure.
6966 * @pmb: pointer to the driver internal queue element for mailbox command.
6967 *
6968 * Completion handler for driver's RAS MBX command to the device.
6969 **/
6970 static void
lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)6971 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6972 {
6973 MAILBOX_t *mb;
6974 union lpfc_sli4_cfg_shdr *shdr;
6975 uint32_t shdr_status, shdr_add_status;
6976 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6977
6978 mb = &pmb->u.mb;
6979
6980 shdr = (union lpfc_sli4_cfg_shdr *)
6981 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6982 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6983 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6984
6985 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6987 "6188 FW LOG mailbox "
6988 "completed with status x%x add_status x%x,"
6989 " mbx status x%x\n",
6990 shdr_status, shdr_add_status, mb->mbxStatus);
6991
6992 ras_fwlog->ras_hwsupport = false;
6993 goto disable_ras;
6994 }
6995
6996 spin_lock_irq(&phba->ras_fwlog_lock);
6997 ras_fwlog->state = ACTIVE;
6998 spin_unlock_irq(&phba->ras_fwlog_lock);
6999 mempool_free(pmb, phba->mbox_mem_pool);
7000
7001 return;
7002
7003 disable_ras:
7004 /* Free RAS DMA memory */
7005 lpfc_sli4_ras_dma_free(phba);
7006 mempool_free(pmb, phba->mbox_mem_pool);
7007 }
7008
7009 /**
7010 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
7011 * @phba: pointer to lpfc hba data structure.
7012 * @fwlog_level: Logging verbosity level.
7013 * @fwlog_enable: Enable/Disable logging.
7014 *
7015 * Initialize memory and post mailbox command to enable FW logging in host
7016 * memory.
7017 **/
7018 int
lpfc_sli4_ras_fwlog_init(struct lpfc_hba * phba,uint32_t fwlog_level,uint32_t fwlog_enable)7019 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
7020 uint32_t fwlog_level,
7021 uint32_t fwlog_enable)
7022 {
7023 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7024 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7025 struct lpfc_dmabuf *dmabuf;
7026 LPFC_MBOXQ_t *mbox;
7027 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7028 int rc = 0;
7029
7030 spin_lock_irq(&phba->ras_fwlog_lock);
7031 ras_fwlog->state = INACTIVE;
7032 spin_unlock_irq(&phba->ras_fwlog_lock);
7033
7034 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7035 phba->cfg_ras_fwlog_buffsize);
7036 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7037
7038 /*
7039 * If re-enabling FW logging support use earlier allocated
7040 * DMA buffers while posting MBX command.
7041 **/
7042 if (!ras_fwlog->lwpd.virt) {
7043 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7044 if (rc) {
7045 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7046 "6189 FW Log Memory Allocation Failed");
7047 return rc;
7048 }
7049 }
7050
7051 /* Setup Mailbox command */
7052 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7053 if (!mbox) {
7054 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7055 "6190 RAS MBX Alloc Failed");
7056 rc = -ENOMEM;
7057 goto mem_free;
7058 }
7059
7060 ras_fwlog->fw_loglevel = fwlog_level;
7061 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7062 sizeof(struct lpfc_sli4_cfg_mhdr));
7063
7064 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7065 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7066 len, LPFC_SLI4_MBX_EMBED);
7067
7068 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7069 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7070 fwlog_enable);
7071 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7072 ras_fwlog->fw_loglevel);
7073 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7074 ras_fwlog->fw_buffcount);
7075 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7076 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7077
7078 /* Update DMA buffer address */
7079 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7080 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7081
7082 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7083 putPaddrLow(dmabuf->phys);
7084
7085 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7086 putPaddrHigh(dmabuf->phys);
7087 }
7088
7089 /* Update LPWD address */
7090 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7091 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7092
7093 spin_lock_irq(&phba->ras_fwlog_lock);
7094 ras_fwlog->state = REG_INPROGRESS;
7095 spin_unlock_irq(&phba->ras_fwlog_lock);
7096 mbox->vport = phba->pport;
7097 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7098
7099 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7100
7101 if (rc == MBX_NOT_FINISHED) {
7102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7103 "6191 FW-Log Mailbox failed. "
7104 "status %d mbxStatus : x%x", rc,
7105 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7106 mempool_free(mbox, phba->mbox_mem_pool);
7107 rc = -EIO;
7108 goto mem_free;
7109 } else
7110 rc = 0;
7111 mem_free:
7112 if (rc)
7113 lpfc_sli4_ras_dma_free(phba);
7114
7115 return rc;
7116 }
7117
7118 /**
7119 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7120 * @phba: Pointer to HBA context object.
7121 *
7122 * Check if RAS is supported on the adapter and initialize it.
7123 **/
7124 void
lpfc_sli4_ras_setup(struct lpfc_hba * phba)7125 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7126 {
7127 /* Check RAS FW Log needs to be enabled or not */
7128 if (lpfc_check_fwlog_support(phba))
7129 return;
7130
7131 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7132 LPFC_RAS_ENABLE_LOGGING);
7133 }
7134
7135 /**
7136 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7137 * @phba: Pointer to HBA context object.
7138 *
7139 * This function allocates all SLI4 resource identifiers.
7140 **/
7141 int
lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba * phba)7142 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7143 {
7144 int i, rc, error = 0;
7145 uint16_t count, base;
7146 unsigned long longs;
7147
7148 if (!phba->sli4_hba.rpi_hdrs_in_use)
7149 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7150 if (phba->sli4_hba.extents_in_use) {
7151 /*
7152 * The port supports resource extents. The XRI, VPI, VFI, RPI
7153 * resource extent count must be read and allocated before
7154 * provisioning the resource id arrays.
7155 */
7156 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7157 LPFC_IDX_RSRC_RDY) {
7158 /*
7159 * Extent-based resources are set - the driver could
7160 * be in a port reset. Figure out if any corrective
7161 * actions need to be taken.
7162 */
7163 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7164 LPFC_RSC_TYPE_FCOE_VFI);
7165 if (rc != 0)
7166 error++;
7167 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7168 LPFC_RSC_TYPE_FCOE_VPI);
7169 if (rc != 0)
7170 error++;
7171 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7172 LPFC_RSC_TYPE_FCOE_XRI);
7173 if (rc != 0)
7174 error++;
7175 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7176 LPFC_RSC_TYPE_FCOE_RPI);
7177 if (rc != 0)
7178 error++;
7179
7180 /*
7181 * It's possible that the number of resources
7182 * provided to this port instance changed between
7183 * resets. Detect this condition and reallocate
7184 * resources. Otherwise, there is no action.
7185 */
7186 if (error) {
7187 lpfc_printf_log(phba, KERN_INFO,
7188 LOG_MBOX | LOG_INIT,
7189 "2931 Detected extent resource "
7190 "change. Reallocating all "
7191 "extents.\n");
7192 rc = lpfc_sli4_dealloc_extent(phba,
7193 LPFC_RSC_TYPE_FCOE_VFI);
7194 rc = lpfc_sli4_dealloc_extent(phba,
7195 LPFC_RSC_TYPE_FCOE_VPI);
7196 rc = lpfc_sli4_dealloc_extent(phba,
7197 LPFC_RSC_TYPE_FCOE_XRI);
7198 rc = lpfc_sli4_dealloc_extent(phba,
7199 LPFC_RSC_TYPE_FCOE_RPI);
7200 } else
7201 return 0;
7202 }
7203
7204 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7205 if (unlikely(rc))
7206 goto err_exit;
7207
7208 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7209 if (unlikely(rc))
7210 goto err_exit;
7211
7212 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7213 if (unlikely(rc))
7214 goto err_exit;
7215
7216 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7217 if (unlikely(rc))
7218 goto err_exit;
7219 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7220 LPFC_IDX_RSRC_RDY);
7221 return rc;
7222 } else {
7223 /*
7224 * The port does not support resource extents. The XRI, VPI,
7225 * VFI, RPI resource ids were determined from READ_CONFIG.
7226 * Just allocate the bitmasks and provision the resource id
7227 * arrays. If a port reset is active, the resources don't
7228 * need any action - just exit.
7229 */
7230 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7231 LPFC_IDX_RSRC_RDY) {
7232 lpfc_sli4_dealloc_resource_identifiers(phba);
7233 lpfc_sli4_remove_rpis(phba);
7234 }
7235 /* RPIs. */
7236 count = phba->sli4_hba.max_cfg_param.max_rpi;
7237 if (count <= 0) {
7238 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7239 "3279 Invalid provisioning of "
7240 "rpi:%d\n", count);
7241 rc = -EINVAL;
7242 goto err_exit;
7243 }
7244 base = phba->sli4_hba.max_cfg_param.rpi_base;
7245 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7246 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7247 sizeof(unsigned long),
7248 GFP_KERNEL);
7249 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7250 rc = -ENOMEM;
7251 goto err_exit;
7252 }
7253 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7254 GFP_KERNEL);
7255 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7256 rc = -ENOMEM;
7257 goto free_rpi_bmask;
7258 }
7259
7260 for (i = 0; i < count; i++)
7261 phba->sli4_hba.rpi_ids[i] = base + i;
7262
7263 /* VPIs. */
7264 count = phba->sli4_hba.max_cfg_param.max_vpi;
7265 if (count <= 0) {
7266 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7267 "3280 Invalid provisioning of "
7268 "vpi:%d\n", count);
7269 rc = -EINVAL;
7270 goto free_rpi_ids;
7271 }
7272 base = phba->sli4_hba.max_cfg_param.vpi_base;
7273 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7274 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7275 GFP_KERNEL);
7276 if (unlikely(!phba->vpi_bmask)) {
7277 rc = -ENOMEM;
7278 goto free_rpi_ids;
7279 }
7280 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7281 GFP_KERNEL);
7282 if (unlikely(!phba->vpi_ids)) {
7283 rc = -ENOMEM;
7284 goto free_vpi_bmask;
7285 }
7286
7287 for (i = 0; i < count; i++)
7288 phba->vpi_ids[i] = base + i;
7289
7290 /* XRIs. */
7291 count = phba->sli4_hba.max_cfg_param.max_xri;
7292 if (count <= 0) {
7293 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7294 "3281 Invalid provisioning of "
7295 "xri:%d\n", count);
7296 rc = -EINVAL;
7297 goto free_vpi_ids;
7298 }
7299 base = phba->sli4_hba.max_cfg_param.xri_base;
7300 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7301 phba->sli4_hba.xri_bmask = kcalloc(longs,
7302 sizeof(unsigned long),
7303 GFP_KERNEL);
7304 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7305 rc = -ENOMEM;
7306 goto free_vpi_ids;
7307 }
7308 phba->sli4_hba.max_cfg_param.xri_used = 0;
7309 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7310 GFP_KERNEL);
7311 if (unlikely(!phba->sli4_hba.xri_ids)) {
7312 rc = -ENOMEM;
7313 goto free_xri_bmask;
7314 }
7315
7316 for (i = 0; i < count; i++)
7317 phba->sli4_hba.xri_ids[i] = base + i;
7318
7319 /* VFIs. */
7320 count = phba->sli4_hba.max_cfg_param.max_vfi;
7321 if (count <= 0) {
7322 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7323 "3282 Invalid provisioning of "
7324 "vfi:%d\n", count);
7325 rc = -EINVAL;
7326 goto free_xri_ids;
7327 }
7328 base = phba->sli4_hba.max_cfg_param.vfi_base;
7329 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7330 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7331 sizeof(unsigned long),
7332 GFP_KERNEL);
7333 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7334 rc = -ENOMEM;
7335 goto free_xri_ids;
7336 }
7337 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7338 GFP_KERNEL);
7339 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7340 rc = -ENOMEM;
7341 goto free_vfi_bmask;
7342 }
7343
7344 for (i = 0; i < count; i++)
7345 phba->sli4_hba.vfi_ids[i] = base + i;
7346
7347 /*
7348 * Mark all resources ready. An HBA reset doesn't need
7349 * to reset the initialization.
7350 */
7351 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7352 LPFC_IDX_RSRC_RDY);
7353 return 0;
7354 }
7355
7356 free_vfi_bmask:
7357 kfree(phba->sli4_hba.vfi_bmask);
7358 phba->sli4_hba.vfi_bmask = NULL;
7359 free_xri_ids:
7360 kfree(phba->sli4_hba.xri_ids);
7361 phba->sli4_hba.xri_ids = NULL;
7362 free_xri_bmask:
7363 kfree(phba->sli4_hba.xri_bmask);
7364 phba->sli4_hba.xri_bmask = NULL;
7365 free_vpi_ids:
7366 kfree(phba->vpi_ids);
7367 phba->vpi_ids = NULL;
7368 free_vpi_bmask:
7369 kfree(phba->vpi_bmask);
7370 phba->vpi_bmask = NULL;
7371 free_rpi_ids:
7372 kfree(phba->sli4_hba.rpi_ids);
7373 phba->sli4_hba.rpi_ids = NULL;
7374 free_rpi_bmask:
7375 kfree(phba->sli4_hba.rpi_bmask);
7376 phba->sli4_hba.rpi_bmask = NULL;
7377 err_exit:
7378 return rc;
7379 }
7380
7381 /**
7382 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7383 * @phba: Pointer to HBA context object.
7384 *
7385 * This function allocates the number of elements for the specified
7386 * resource type.
7387 **/
7388 int
lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba * phba)7389 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7390 {
7391 if (phba->sli4_hba.extents_in_use) {
7392 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7393 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7394 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7395 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7396 } else {
7397 kfree(phba->vpi_bmask);
7398 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7399 kfree(phba->vpi_ids);
7400 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7401 kfree(phba->sli4_hba.xri_bmask);
7402 kfree(phba->sli4_hba.xri_ids);
7403 kfree(phba->sli4_hba.vfi_bmask);
7404 kfree(phba->sli4_hba.vfi_ids);
7405 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7406 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7407 }
7408
7409 return 0;
7410 }
7411
7412 /**
7413 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7414 * @phba: Pointer to HBA context object.
7415 * @type: The resource extent type.
7416 * @extnt_cnt: buffer to hold port extent count response
7417 * @extnt_size: buffer to hold port extent size response.
7418 *
7419 * This function calls the port to read the host allocated extents
7420 * for a particular type.
7421 **/
7422 int
lpfc_sli4_get_allocated_extnts(struct lpfc_hba * phba,uint16_t type,uint16_t * extnt_cnt,uint16_t * extnt_size)7423 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7424 uint16_t *extnt_cnt, uint16_t *extnt_size)
7425 {
7426 bool emb;
7427 int rc = 0;
7428 uint16_t curr_blks = 0;
7429 uint32_t req_len, emb_len;
7430 uint32_t alloc_len, mbox_tmo;
7431 struct list_head *blk_list_head;
7432 struct lpfc_rsrc_blks *rsrc_blk;
7433 LPFC_MBOXQ_t *mbox;
7434 void *virtaddr = NULL;
7435 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7436 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7437 union lpfc_sli4_cfg_shdr *shdr;
7438
7439 switch (type) {
7440 case LPFC_RSC_TYPE_FCOE_VPI:
7441 blk_list_head = &phba->lpfc_vpi_blk_list;
7442 break;
7443 case LPFC_RSC_TYPE_FCOE_XRI:
7444 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7445 break;
7446 case LPFC_RSC_TYPE_FCOE_VFI:
7447 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7448 break;
7449 case LPFC_RSC_TYPE_FCOE_RPI:
7450 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7451 break;
7452 default:
7453 return -EIO;
7454 }
7455
7456 /* Count the number of extents currently allocatd for this type. */
7457 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7458 if (curr_blks == 0) {
7459 /*
7460 * The GET_ALLOCATED mailbox does not return the size,
7461 * just the count. The size should be just the size
7462 * stored in the current allocated block and all sizes
7463 * for an extent type are the same so set the return
7464 * value now.
7465 */
7466 *extnt_size = rsrc_blk->rsrc_size;
7467 }
7468 curr_blks++;
7469 }
7470
7471 /*
7472 * Calculate the size of an embedded mailbox. The uint32_t
7473 * accounts for extents-specific word.
7474 */
7475 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7476 sizeof(uint32_t);
7477
7478 /*
7479 * Presume the allocation and response will fit into an embedded
7480 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7481 */
7482 emb = LPFC_SLI4_MBX_EMBED;
7483 req_len = emb_len;
7484 if (req_len > emb_len) {
7485 req_len = curr_blks * sizeof(uint16_t) +
7486 sizeof(union lpfc_sli4_cfg_shdr) +
7487 sizeof(uint32_t);
7488 emb = LPFC_SLI4_MBX_NEMBED;
7489 }
7490
7491 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7492 if (!mbox)
7493 return -ENOMEM;
7494 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7495
7496 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7497 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7498 req_len, emb);
7499 if (alloc_len < req_len) {
7500 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7501 "2983 Allocated DMA memory size (x%x) is "
7502 "less than the requested DMA memory "
7503 "size (x%x)\n", alloc_len, req_len);
7504 rc = -ENOMEM;
7505 goto err_exit;
7506 }
7507 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7508 if (unlikely(rc)) {
7509 rc = -EIO;
7510 goto err_exit;
7511 }
7512
7513 if (!phba->sli4_hba.intr_enable)
7514 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7515 else {
7516 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7517 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7518 }
7519
7520 if (unlikely(rc)) {
7521 rc = -EIO;
7522 goto err_exit;
7523 }
7524
7525 /*
7526 * Figure out where the response is located. Then get local pointers
7527 * to the response data. The port does not guarantee to respond to
7528 * all extents counts request so update the local variable with the
7529 * allocated count from the port.
7530 */
7531 if (emb == LPFC_SLI4_MBX_EMBED) {
7532 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7533 shdr = &rsrc_ext->header.cfg_shdr;
7534 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7535 } else {
7536 virtaddr = mbox->sge_array->addr[0];
7537 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7538 shdr = &n_rsrc->cfg_shdr;
7539 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7540 }
7541
7542 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7543 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7544 "2984 Failed to read allocated resources "
7545 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7546 type,
7547 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7548 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7549 rc = -EIO;
7550 goto err_exit;
7551 }
7552 err_exit:
7553 lpfc_sli4_mbox_cmd_free(phba, mbox);
7554 return rc;
7555 }
7556
7557 /**
7558 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7559 * @phba: pointer to lpfc hba data structure.
7560 * @sgl_list: linked link of sgl buffers to post
7561 * @cnt: number of linked list buffers
7562 *
7563 * This routine walks the list of buffers that have been allocated and
7564 * repost them to the port by using SGL block post. This is needed after a
7565 * pci_function_reset/warm_start or start. It attempts to construct blocks
7566 * of buffer sgls which contains contiguous xris and uses the non-embedded
7567 * SGL block post mailbox commands to post them to the port. For single
7568 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7569 * mailbox command for posting.
7570 *
7571 * Returns: 0 = success, non-zero failure.
7572 **/
7573 static int
lpfc_sli4_repost_sgl_list(struct lpfc_hba * phba,struct list_head * sgl_list,int cnt)7574 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7575 struct list_head *sgl_list, int cnt)
7576 {
7577 struct lpfc_sglq *sglq_entry = NULL;
7578 struct lpfc_sglq *sglq_entry_next = NULL;
7579 struct lpfc_sglq *sglq_entry_first = NULL;
7580 int status = 0, total_cnt;
7581 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7582 int last_xritag = NO_XRI;
7583 LIST_HEAD(prep_sgl_list);
7584 LIST_HEAD(blck_sgl_list);
7585 LIST_HEAD(allc_sgl_list);
7586 LIST_HEAD(post_sgl_list);
7587 LIST_HEAD(free_sgl_list);
7588
7589 spin_lock_irq(&phba->hbalock);
7590 spin_lock(&phba->sli4_hba.sgl_list_lock);
7591 list_splice_init(sgl_list, &allc_sgl_list);
7592 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7593 spin_unlock_irq(&phba->hbalock);
7594
7595 total_cnt = cnt;
7596 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7597 &allc_sgl_list, list) {
7598 list_del_init(&sglq_entry->list);
7599 block_cnt++;
7600 if ((last_xritag != NO_XRI) &&
7601 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7602 /* a hole in xri block, form a sgl posting block */
7603 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7604 post_cnt = block_cnt - 1;
7605 /* prepare list for next posting block */
7606 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7607 block_cnt = 1;
7608 } else {
7609 /* prepare list for next posting block */
7610 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7611 /* enough sgls for non-embed sgl mbox command */
7612 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7613 list_splice_init(&prep_sgl_list,
7614 &blck_sgl_list);
7615 post_cnt = block_cnt;
7616 block_cnt = 0;
7617 }
7618 }
7619 num_posted++;
7620
7621 /* keep track of last sgl's xritag */
7622 last_xritag = sglq_entry->sli4_xritag;
7623
7624 /* end of repost sgl list condition for buffers */
7625 if (num_posted == total_cnt) {
7626 if (post_cnt == 0) {
7627 list_splice_init(&prep_sgl_list,
7628 &blck_sgl_list);
7629 post_cnt = block_cnt;
7630 } else if (block_cnt == 1) {
7631 status = lpfc_sli4_post_sgl(phba,
7632 sglq_entry->phys, 0,
7633 sglq_entry->sli4_xritag);
7634 if (!status) {
7635 /* successful, put sgl to posted list */
7636 list_add_tail(&sglq_entry->list,
7637 &post_sgl_list);
7638 } else {
7639 /* Failure, put sgl to free list */
7640 lpfc_printf_log(phba, KERN_WARNING,
7641 LOG_SLI,
7642 "3159 Failed to post "
7643 "sgl, xritag:x%x\n",
7644 sglq_entry->sli4_xritag);
7645 list_add_tail(&sglq_entry->list,
7646 &free_sgl_list);
7647 total_cnt--;
7648 }
7649 }
7650 }
7651
7652 /* continue until a nembed page worth of sgls */
7653 if (post_cnt == 0)
7654 continue;
7655
7656 /* post the buffer list sgls as a block */
7657 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7658 post_cnt);
7659
7660 if (!status) {
7661 /* success, put sgl list to posted sgl list */
7662 list_splice_init(&blck_sgl_list, &post_sgl_list);
7663 } else {
7664 /* Failure, put sgl list to free sgl list */
7665 sglq_entry_first = list_first_entry(&blck_sgl_list,
7666 struct lpfc_sglq,
7667 list);
7668 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7669 "3160 Failed to post sgl-list, "
7670 "xritag:x%x-x%x\n",
7671 sglq_entry_first->sli4_xritag,
7672 (sglq_entry_first->sli4_xritag +
7673 post_cnt - 1));
7674 list_splice_init(&blck_sgl_list, &free_sgl_list);
7675 total_cnt -= post_cnt;
7676 }
7677
7678 /* don't reset xirtag due to hole in xri block */
7679 if (block_cnt == 0)
7680 last_xritag = NO_XRI;
7681
7682 /* reset sgl post count for next round of posting */
7683 post_cnt = 0;
7684 }
7685
7686 /* free the sgls failed to post */
7687 lpfc_free_sgl_list(phba, &free_sgl_list);
7688
7689 /* push sgls posted to the available list */
7690 if (!list_empty(&post_sgl_list)) {
7691 spin_lock_irq(&phba->hbalock);
7692 spin_lock(&phba->sli4_hba.sgl_list_lock);
7693 list_splice_init(&post_sgl_list, sgl_list);
7694 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7695 spin_unlock_irq(&phba->hbalock);
7696 } else {
7697 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7698 "3161 Failure to post sgl to port,status %x "
7699 "blkcnt %d totalcnt %d postcnt %d\n",
7700 status, block_cnt, total_cnt, post_cnt);
7701 return -EIO;
7702 }
7703
7704 /* return the number of XRIs actually posted */
7705 return total_cnt;
7706 }
7707
7708 /**
7709 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7710 * @phba: pointer to lpfc hba data structure.
7711 *
7712 * This routine walks the list of nvme buffers that have been allocated and
7713 * repost them to the port by using SGL block post. This is needed after a
7714 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7715 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7716 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7717 *
7718 * Returns: 0 = success, non-zero failure.
7719 **/
7720 static int
lpfc_sli4_repost_io_sgl_list(struct lpfc_hba * phba)7721 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7722 {
7723 LIST_HEAD(post_nblist);
7724 int num_posted, rc = 0;
7725
7726 /* get all NVME buffers need to repost to a local list */
7727 lpfc_io_buf_flush(phba, &post_nblist);
7728
7729 /* post the list of nvme buffer sgls to port if available */
7730 if (!list_empty(&post_nblist)) {
7731 num_posted = lpfc_sli4_post_io_sgl_list(
7732 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7733 /* failed to post any nvme buffer, return error */
7734 if (num_posted == 0)
7735 rc = -EIO;
7736 }
7737 return rc;
7738 }
7739
7740 static void
lpfc_set_host_data(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox)7741 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7742 {
7743 uint32_t len;
7744
7745 len = sizeof(struct lpfc_mbx_set_host_data) -
7746 sizeof(struct lpfc_sli4_cfg_mhdr);
7747 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7748 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7749 LPFC_SLI4_MBX_EMBED);
7750
7751 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7752 mbox->u.mqe.un.set_host_data.param_len =
7753 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7754 snprintf(mbox->u.mqe.un.set_host_data.un.data,
7755 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7756 "Linux %s v"LPFC_DRIVER_VERSION,
7757 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7758 }
7759
7760 int
lpfc_post_rq_buffer(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq,int count,int idx)7761 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7762 struct lpfc_queue *drq, int count, int idx)
7763 {
7764 int rc, i;
7765 struct lpfc_rqe hrqe;
7766 struct lpfc_rqe drqe;
7767 struct lpfc_rqb *rqbp;
7768 unsigned long flags;
7769 struct rqb_dmabuf *rqb_buffer;
7770 LIST_HEAD(rqb_buf_list);
7771
7772 rqbp = hrq->rqbp;
7773 for (i = 0; i < count; i++) {
7774 spin_lock_irqsave(&phba->hbalock, flags);
7775 /* IF RQ is already full, don't bother */
7776 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7777 spin_unlock_irqrestore(&phba->hbalock, flags);
7778 break;
7779 }
7780 spin_unlock_irqrestore(&phba->hbalock, flags);
7781
7782 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7783 if (!rqb_buffer)
7784 break;
7785 rqb_buffer->hrq = hrq;
7786 rqb_buffer->drq = drq;
7787 rqb_buffer->idx = idx;
7788 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7789 }
7790
7791 spin_lock_irqsave(&phba->hbalock, flags);
7792 while (!list_empty(&rqb_buf_list)) {
7793 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7794 hbuf.list);
7795
7796 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7797 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7798 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7799 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7800 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7801 if (rc < 0) {
7802 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7803 "6421 Cannot post to HRQ %d: %x %x %x "
7804 "DRQ %x %x\n",
7805 hrq->queue_id,
7806 hrq->host_index,
7807 hrq->hba_index,
7808 hrq->entry_count,
7809 drq->host_index,
7810 drq->hba_index);
7811 rqbp->rqb_free_buffer(phba, rqb_buffer);
7812 } else {
7813 list_add_tail(&rqb_buffer->hbuf.list,
7814 &rqbp->rqb_buffer_list);
7815 rqbp->buffer_count++;
7816 }
7817 }
7818 spin_unlock_irqrestore(&phba->hbalock, flags);
7819 return 1;
7820 }
7821
7822 static void
lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)7823 lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7824 {
7825 union lpfc_sli4_cfg_shdr *shdr;
7826 u32 shdr_status, shdr_add_status;
7827
7828 shdr = (union lpfc_sli4_cfg_shdr *)
7829 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7830 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7831 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7832 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7833 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
7834 "4622 SET_FEATURE (x%x) mbox failed, "
7835 "status x%x add_status x%x, mbx status x%x\n",
7836 LPFC_SET_LD_SIGNAL, shdr_status,
7837 shdr_add_status, pmb->u.mb.mbxStatus);
7838 phba->degrade_activate_threshold = 0;
7839 phba->degrade_deactivate_threshold = 0;
7840 phba->fec_degrade_interval = 0;
7841 goto out;
7842 }
7843
7844 phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
7845 phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
7846 phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
7847
7848 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
7849 "4624 Success: da x%x dd x%x interval x%x\n",
7850 phba->degrade_activate_threshold,
7851 phba->degrade_deactivate_threshold,
7852 phba->fec_degrade_interval);
7853 out:
7854 mempool_free(pmb, phba->mbox_mem_pool);
7855 }
7856
7857 int
lpfc_read_lds_params(struct lpfc_hba * phba)7858 lpfc_read_lds_params(struct lpfc_hba *phba)
7859 {
7860 LPFC_MBOXQ_t *mboxq;
7861 int rc;
7862
7863 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7864 if (!mboxq)
7865 return -ENOMEM;
7866
7867 lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
7868 mboxq->vport = phba->pport;
7869 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
7870 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7871 if (rc == MBX_NOT_FINISHED) {
7872 mempool_free(mboxq, phba->mbox_mem_pool);
7873 return -EIO;
7874 }
7875 return 0;
7876 }
7877
7878 static void
lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)7879 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7880 {
7881 struct lpfc_vport *vport = pmb->vport;
7882 union lpfc_sli4_cfg_shdr *shdr;
7883 u32 shdr_status, shdr_add_status;
7884 u32 sig, acqe;
7885
7886 /* Two outcomes. (1) Set featurs was successul and EDC negotiation
7887 * is done. (2) Mailbox failed and send FPIN support only.
7888 */
7889 shdr = (union lpfc_sli4_cfg_shdr *)
7890 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7891 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7892 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7893 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7895 "2516 CGN SET_FEATURE mbox failed with "
7896 "status x%x add_status x%x, mbx status x%x "
7897 "Reset Congestion to FPINs only\n",
7898 shdr_status, shdr_add_status,
7899 pmb->u.mb.mbxStatus);
7900 /* If there is a mbox error, move on to RDF */
7901 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7902 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7903 goto out;
7904 }
7905
7906 /* Zero out Congestion Signal ACQE counter */
7907 phba->cgn_acqe_cnt = 0;
7908
7909 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7910 &pmb->u.mqe.un.set_feature);
7911 sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7912 &pmb->u.mqe.un.set_feature);
7913 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7914 "4620 SET_FEATURES Success: Freq: %ds %dms "
7915 " Reg: x%x x%x\n", acqe, sig,
7916 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7917 out:
7918 mempool_free(pmb, phba->mbox_mem_pool);
7919
7920 /* Register for FPIN events from the fabric now that the
7921 * EDC common_set_features has completed.
7922 */
7923 lpfc_issue_els_rdf(vport, 0);
7924 }
7925
7926 int
lpfc_config_cgn_signal(struct lpfc_hba * phba)7927 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7928 {
7929 LPFC_MBOXQ_t *mboxq;
7930 u32 rc;
7931
7932 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7933 if (!mboxq)
7934 goto out_rdf;
7935
7936 lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7937 mboxq->vport = phba->pport;
7938 mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7939
7940 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7941 "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7942 "Reg: x%x x%x\n",
7943 phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7944 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7945
7946 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7947 if (rc == MBX_NOT_FINISHED)
7948 goto out;
7949 return 0;
7950
7951 out:
7952 mempool_free(mboxq, phba->mbox_mem_pool);
7953 out_rdf:
7954 /* If there is a mbox error, move on to RDF */
7955 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7956 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7957 lpfc_issue_els_rdf(phba->pport, 0);
7958 return -EIO;
7959 }
7960
7961 /**
7962 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7963 * @phba: pointer to lpfc hba data structure.
7964 *
7965 * This routine initializes the per-eq idle_stat to dynamically dictate
7966 * polling decisions.
7967 *
7968 * Return codes:
7969 * None
7970 **/
lpfc_init_idle_stat_hb(struct lpfc_hba * phba)7971 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7972 {
7973 int i;
7974 struct lpfc_sli4_hdw_queue *hdwq;
7975 struct lpfc_queue *eq;
7976 struct lpfc_idle_stat *idle_stat;
7977 u64 wall;
7978
7979 for_each_present_cpu(i) {
7980 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7981 eq = hdwq->hba_eq;
7982
7983 /* Skip if we've already handled this eq's primary CPU */
7984 if (eq->chann != i)
7985 continue;
7986
7987 idle_stat = &phba->sli4_hba.idle_stat[i];
7988
7989 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7990 idle_stat->prev_wall = wall;
7991
7992 if (phba->nvmet_support ||
7993 phba->cmf_active_mode != LPFC_CFG_OFF ||
7994 phba->intr_type != MSIX)
7995 eq->poll_mode = LPFC_QUEUE_WORK;
7996 else
7997 eq->poll_mode = LPFC_THREADED_IRQ;
7998 }
7999
8000 if (!phba->nvmet_support && phba->intr_type == MSIX)
8001 schedule_delayed_work(&phba->idle_stat_delay_work,
8002 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
8003 }
8004
lpfc_sli4_dip(struct lpfc_hba * phba)8005 static void lpfc_sli4_dip(struct lpfc_hba *phba)
8006 {
8007 uint32_t if_type;
8008
8009 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8010 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
8011 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
8012 struct lpfc_register reg_data;
8013
8014 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8015 ®_data.word0))
8016 return;
8017
8018 if (bf_get(lpfc_sliport_status_dip, ®_data))
8019 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8020 "2904 Firmware Dump Image Present"
8021 " on Adapter");
8022 }
8023 }
8024
8025 /**
8026 * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
8027 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8028 * @entries: Number of rx_info_entry objects to allocate in ring
8029 *
8030 * Return:
8031 * 0 - Success
8032 * ENOMEM - Failure to kmalloc
8033 **/
lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor * rx_monitor,u32 entries)8034 int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
8035 u32 entries)
8036 {
8037 rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
8038 GFP_KERNEL);
8039 if (!rx_monitor->ring)
8040 return -ENOMEM;
8041
8042 rx_monitor->head_idx = 0;
8043 rx_monitor->tail_idx = 0;
8044 spin_lock_init(&rx_monitor->lock);
8045 rx_monitor->entries = entries;
8046
8047 return 0;
8048 }
8049
8050 /**
8051 * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
8052 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8053 *
8054 * Called after cancellation of cmf_timer.
8055 **/
lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor * rx_monitor)8056 void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
8057 {
8058 kfree(rx_monitor->ring);
8059 rx_monitor->ring = NULL;
8060 rx_monitor->entries = 0;
8061 rx_monitor->head_idx = 0;
8062 rx_monitor->tail_idx = 0;
8063 }
8064
8065 /**
8066 * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
8067 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8068 * @entry: Pointer to rx_info_entry
8069 *
8070 * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
8071 * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
8072 *
8073 * This is called from lpfc_cmf_timer, which is in timer/softirq context.
8074 *
8075 * In cases of old data overflow, we do a best effort of FIFO order.
8076 **/
lpfc_rx_monitor_record(struct lpfc_rx_info_monitor * rx_monitor,struct rx_info_entry * entry)8077 void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
8078 struct rx_info_entry *entry)
8079 {
8080 struct rx_info_entry *ring = rx_monitor->ring;
8081 u32 *head_idx = &rx_monitor->head_idx;
8082 u32 *tail_idx = &rx_monitor->tail_idx;
8083 spinlock_t *ring_lock = &rx_monitor->lock;
8084 u32 ring_size = rx_monitor->entries;
8085
8086 spin_lock(ring_lock);
8087 memcpy(&ring[*tail_idx], entry, sizeof(*entry));
8088 *tail_idx = (*tail_idx + 1) % ring_size;
8089
8090 /* Best effort of FIFO saved data */
8091 if (*tail_idx == *head_idx)
8092 *head_idx = (*head_idx + 1) % ring_size;
8093
8094 spin_unlock(ring_lock);
8095 }
8096
8097 /**
8098 * lpfc_rx_monitor_report - Read out rx_monitor's ring
8099 * @phba: Pointer to lpfc_hba object
8100 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8101 * @buf: Pointer to char buffer that will contain rx monitor info data
8102 * @buf_len: Length buf including null char
8103 * @max_read_entries: Maximum number of entries to read out of ring
8104 *
8105 * Used to dump/read what's in rx_monitor's ring buffer.
8106 *
8107 * If buf is NULL || buf_len == 0, then it is implied that we want to log the
8108 * information to kmsg instead of filling out buf.
8109 *
8110 * Return:
8111 * Number of entries read out of the ring
8112 **/
lpfc_rx_monitor_report(struct lpfc_hba * phba,struct lpfc_rx_info_monitor * rx_monitor,char * buf,u32 buf_len,u32 max_read_entries)8113 u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
8114 struct lpfc_rx_info_monitor *rx_monitor, char *buf,
8115 u32 buf_len, u32 max_read_entries)
8116 {
8117 struct rx_info_entry *ring = rx_monitor->ring;
8118 struct rx_info_entry *entry;
8119 u32 *head_idx = &rx_monitor->head_idx;
8120 u32 *tail_idx = &rx_monitor->tail_idx;
8121 spinlock_t *ring_lock = &rx_monitor->lock;
8122 u32 ring_size = rx_monitor->entries;
8123 u32 cnt = 0;
8124 char tmp[DBG_LOG_STR_SZ] = {0};
8125 bool log_to_kmsg = (!buf || !buf_len) ? true : false;
8126
8127 if (!log_to_kmsg) {
8128 /* clear the buffer to be sure */
8129 memset(buf, 0, buf_len);
8130
8131 scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
8132 "%-8s%-8s%-8s%-16s\n",
8133 "MaxBPI", "Tot_Data_CMF",
8134 "Tot_Data_Cmd", "Tot_Data_Cmpl",
8135 "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
8136 "IO_cnt", "Info", "BWutil(ms)");
8137 }
8138
8139 /* Needs to be _irq because record is called from timer interrupt
8140 * context
8141 */
8142 spin_lock_irq(ring_lock);
8143 while (*head_idx != *tail_idx) {
8144 entry = &ring[*head_idx];
8145
8146 /* Read out this entry's data. */
8147 if (!log_to_kmsg) {
8148 /* If !log_to_kmsg, then store to buf. */
8149 scnprintf(tmp, sizeof(tmp),
8150 "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
8151 "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
8152 *head_idx, entry->max_bytes_per_interval,
8153 entry->cmf_bytes, entry->total_bytes,
8154 entry->rcv_bytes, entry->avg_io_latency,
8155 entry->avg_io_size, entry->max_read_cnt,
8156 entry->cmf_busy, entry->io_cnt,
8157 entry->cmf_info, entry->timer_utilization,
8158 entry->timer_interval);
8159
8160 /* Check for buffer overflow */
8161 if ((strlen(buf) + strlen(tmp)) >= buf_len)
8162 break;
8163
8164 /* Append entry's data to buffer */
8165 strlcat(buf, tmp, buf_len);
8166 } else {
8167 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
8168 "4410 %02u: MBPI %llu Xmit %llu "
8169 "Cmpl %llu Lat %llu ASz %llu Info %02u "
8170 "BWUtil %u Int %u slot %u\n",
8171 cnt, entry->max_bytes_per_interval,
8172 entry->total_bytes, entry->rcv_bytes,
8173 entry->avg_io_latency,
8174 entry->avg_io_size, entry->cmf_info,
8175 entry->timer_utilization,
8176 entry->timer_interval, *head_idx);
8177 }
8178
8179 *head_idx = (*head_idx + 1) % ring_size;
8180
8181 /* Don't feed more than max_read_entries */
8182 cnt++;
8183 if (cnt >= max_read_entries)
8184 break;
8185 }
8186 spin_unlock_irq(ring_lock);
8187
8188 return cnt;
8189 }
8190
8191 /**
8192 * lpfc_cmf_setup - Initialize idle_stat tracking
8193 * @phba: Pointer to HBA context object.
8194 *
8195 * This is called from HBA setup during driver load or when the HBA
8196 * comes online. this does all the initialization to support CMF and MI.
8197 **/
8198 static int
lpfc_cmf_setup(struct lpfc_hba * phba)8199 lpfc_cmf_setup(struct lpfc_hba *phba)
8200 {
8201 LPFC_MBOXQ_t *mboxq;
8202 struct lpfc_dmabuf *mp;
8203 struct lpfc_pc_sli4_params *sli4_params;
8204 int rc, cmf, mi_ver;
8205
8206 rc = lpfc_sli4_refresh_params(phba);
8207 if (unlikely(rc))
8208 return rc;
8209
8210 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8211 if (!mboxq)
8212 return -ENOMEM;
8213
8214 sli4_params = &phba->sli4_hba.pc_sli4_params;
8215
8216 /* Always try to enable MI feature if we can */
8217 if (sli4_params->mi_ver) {
8218 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
8219 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8220 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
8221 &mboxq->u.mqe.un.set_feature);
8222
8223 if (rc == MBX_SUCCESS) {
8224 if (mi_ver) {
8225 lpfc_printf_log(phba,
8226 KERN_WARNING, LOG_CGN_MGMT,
8227 "6215 MI is enabled\n");
8228 sli4_params->mi_ver = mi_ver;
8229 } else {
8230 lpfc_printf_log(phba,
8231 KERN_WARNING, LOG_CGN_MGMT,
8232 "6338 MI is disabled\n");
8233 sli4_params->mi_ver = 0;
8234 }
8235 } else {
8236 /* mi_ver is already set from GET_SLI4_PARAMETERS */
8237 lpfc_printf_log(phba, KERN_INFO,
8238 LOG_CGN_MGMT | LOG_INIT,
8239 "6245 Enable MI Mailbox x%x (x%x/x%x) "
8240 "failed, rc:x%x mi:x%x\n",
8241 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8242 lpfc_sli_config_mbox_subsys_get
8243 (phba, mboxq),
8244 lpfc_sli_config_mbox_opcode_get
8245 (phba, mboxq),
8246 rc, sli4_params->mi_ver);
8247 }
8248 } else {
8249 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8250 "6217 MI is disabled\n");
8251 }
8252
8253 /* Ensure FDMI is enabled for MI if enable_mi is set */
8254 if (sli4_params->mi_ver)
8255 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8256
8257 /* Always try to enable CMF feature if we can */
8258 if (sli4_params->cmf) {
8259 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8260 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8261 cmf = bf_get(lpfc_mbx_set_feature_cmf,
8262 &mboxq->u.mqe.un.set_feature);
8263 if (rc == MBX_SUCCESS && cmf) {
8264 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8265 "6218 CMF is enabled: mode %d\n",
8266 phba->cmf_active_mode);
8267 } else {
8268 lpfc_printf_log(phba, KERN_WARNING,
8269 LOG_CGN_MGMT | LOG_INIT,
8270 "6219 Enable CMF Mailbox x%x (x%x/x%x) "
8271 "failed, rc:x%x dd:x%x\n",
8272 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8273 lpfc_sli_config_mbox_subsys_get
8274 (phba, mboxq),
8275 lpfc_sli_config_mbox_opcode_get
8276 (phba, mboxq),
8277 rc, cmf);
8278 sli4_params->cmf = 0;
8279 phba->cmf_active_mode = LPFC_CFG_OFF;
8280 goto no_cmf;
8281 }
8282
8283 /* Allocate Congestion Information Buffer */
8284 if (!phba->cgn_i) {
8285 mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8286 if (mp)
8287 mp->virt = dma_alloc_coherent
8288 (&phba->pcidev->dev,
8289 sizeof(struct lpfc_cgn_info),
8290 &mp->phys, GFP_KERNEL);
8291 if (!mp || !mp->virt) {
8292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8293 "2640 Failed to alloc memory "
8294 "for Congestion Info\n");
8295 kfree(mp);
8296 sli4_params->cmf = 0;
8297 phba->cmf_active_mode = LPFC_CFG_OFF;
8298 goto no_cmf;
8299 }
8300 phba->cgn_i = mp;
8301
8302 /* initialize congestion buffer info */
8303 lpfc_init_congestion_buf(phba);
8304 lpfc_init_congestion_stat(phba);
8305
8306 /* Zero out Congestion Signal counters */
8307 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8308 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8309 }
8310
8311 rc = lpfc_sli4_cgn_params_read(phba);
8312 if (rc < 0) {
8313 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8314 "6242 Error reading Cgn Params (%d)\n",
8315 rc);
8316 /* Ensure CGN Mode is off */
8317 sli4_params->cmf = 0;
8318 } else if (!rc) {
8319 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8320 "6243 CGN Event empty object.\n");
8321 /* Ensure CGN Mode is off */
8322 sli4_params->cmf = 0;
8323 }
8324 } else {
8325 no_cmf:
8326 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8327 "6220 CMF is disabled\n");
8328 }
8329
8330 /* Only register congestion buffer with firmware if BOTH
8331 * CMF and E2E are enabled.
8332 */
8333 if (sli4_params->cmf && sli4_params->mi_ver) {
8334 rc = lpfc_reg_congestion_buf(phba);
8335 if (rc) {
8336 dma_free_coherent(&phba->pcidev->dev,
8337 sizeof(struct lpfc_cgn_info),
8338 phba->cgn_i->virt, phba->cgn_i->phys);
8339 kfree(phba->cgn_i);
8340 phba->cgn_i = NULL;
8341 /* Ensure CGN Mode is off */
8342 phba->cmf_active_mode = LPFC_CFG_OFF;
8343 sli4_params->cmf = 0;
8344 return 0;
8345 }
8346 }
8347 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8348 "6470 Setup MI version %d CMF %d mode %d\n",
8349 sli4_params->mi_ver, sli4_params->cmf,
8350 phba->cmf_active_mode);
8351
8352 mempool_free(mboxq, phba->mbox_mem_pool);
8353
8354 /* Initialize atomic counters */
8355 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8356 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8357 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8358 atomic_set(&phba->cgn_sync_warn_cnt, 0);
8359 atomic_set(&phba->cgn_driver_evt_cnt, 0);
8360 atomic_set(&phba->cgn_latency_evt_cnt, 0);
8361 atomic64_set(&phba->cgn_latency_evt, 0);
8362
8363 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8364
8365 /* Allocate RX Monitor Buffer */
8366 if (!phba->rx_monitor) {
8367 phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
8368 GFP_KERNEL);
8369
8370 if (!phba->rx_monitor) {
8371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8372 "2644 Failed to alloc memory "
8373 "for RX Monitor Buffer\n");
8374 return -ENOMEM;
8375 }
8376
8377 /* Instruct the rx_monitor object to instantiate its ring */
8378 if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
8379 LPFC_MAX_RXMONITOR_ENTRY)) {
8380 kfree(phba->rx_monitor);
8381 phba->rx_monitor = NULL;
8382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8383 "2645 Failed to alloc memory "
8384 "for RX Monitor's Ring\n");
8385 return -ENOMEM;
8386 }
8387 }
8388
8389 return 0;
8390 }
8391
8392 static int
lpfc_set_host_tm(struct lpfc_hba * phba)8393 lpfc_set_host_tm(struct lpfc_hba *phba)
8394 {
8395 LPFC_MBOXQ_t *mboxq;
8396 uint32_t len, rc;
8397 struct timespec64 cur_time;
8398 struct tm broken;
8399 uint32_t month, day, year;
8400 uint32_t hour, minute, second;
8401 struct lpfc_mbx_set_host_date_time *tm;
8402
8403 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8404 if (!mboxq)
8405 return -ENOMEM;
8406
8407 len = sizeof(struct lpfc_mbx_set_host_data) -
8408 sizeof(struct lpfc_sli4_cfg_mhdr);
8409 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8410 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8411 LPFC_SLI4_MBX_EMBED);
8412
8413 mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8414 mboxq->u.mqe.un.set_host_data.param_len =
8415 sizeof(struct lpfc_mbx_set_host_date_time);
8416 tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8417 ktime_get_real_ts64(&cur_time);
8418 time64_to_tm(cur_time.tv_sec, 0, &broken);
8419 month = broken.tm_mon + 1;
8420 day = broken.tm_mday;
8421 year = broken.tm_year - 100;
8422 hour = broken.tm_hour;
8423 minute = broken.tm_min;
8424 second = broken.tm_sec;
8425 bf_set(lpfc_mbx_set_host_month, tm, month);
8426 bf_set(lpfc_mbx_set_host_day, tm, day);
8427 bf_set(lpfc_mbx_set_host_year, tm, year);
8428 bf_set(lpfc_mbx_set_host_hour, tm, hour);
8429 bf_set(lpfc_mbx_set_host_min, tm, minute);
8430 bf_set(lpfc_mbx_set_host_sec, tm, second);
8431
8432 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8433 mempool_free(mboxq, phba->mbox_mem_pool);
8434 return rc;
8435 }
8436
8437 /**
8438 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8439 * @phba: Pointer to HBA context object.
8440 *
8441 * This function is the main SLI4 device initialization PCI function. This
8442 * function is called by the HBA initialization code, HBA reset code and
8443 * HBA error attention handler code. Caller is not required to hold any
8444 * locks.
8445 **/
8446 int
lpfc_sli4_hba_setup(struct lpfc_hba * phba)8447 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8448 {
8449 int rc, i, cnt, len, dd;
8450 LPFC_MBOXQ_t *mboxq;
8451 struct lpfc_mqe *mqe;
8452 uint8_t *vpd;
8453 uint32_t vpd_size;
8454 uint32_t ftr_rsp = 0;
8455 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8456 struct lpfc_vport *vport = phba->pport;
8457 struct lpfc_dmabuf *mp;
8458 struct lpfc_rqb *rqbp;
8459 u32 flg;
8460
8461 /* Perform a PCI function reset to start from clean */
8462 rc = lpfc_pci_function_reset(phba);
8463 if (unlikely(rc))
8464 return -ENODEV;
8465
8466 /* Check the HBA Host Status Register for readyness */
8467 rc = lpfc_sli4_post_status_check(phba);
8468 if (unlikely(rc))
8469 return -ENODEV;
8470 else {
8471 spin_lock_irq(&phba->hbalock);
8472 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8473 flg = phba->sli.sli_flag;
8474 spin_unlock_irq(&phba->hbalock);
8475 /* Allow a little time after setting SLI_ACTIVE for any polled
8476 * MBX commands to complete via BSG.
8477 */
8478 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8479 msleep(20);
8480 spin_lock_irq(&phba->hbalock);
8481 flg = phba->sli.sli_flag;
8482 spin_unlock_irq(&phba->hbalock);
8483 }
8484 }
8485 phba->hba_flag &= ~HBA_SETUP;
8486
8487 lpfc_sli4_dip(phba);
8488
8489 /*
8490 * Allocate a single mailbox container for initializing the
8491 * port.
8492 */
8493 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8494 if (!mboxq)
8495 return -ENOMEM;
8496
8497 /* Issue READ_REV to collect vpd and FW information. */
8498 vpd_size = SLI4_PAGE_SIZE;
8499 vpd = kzalloc(vpd_size, GFP_KERNEL);
8500 if (!vpd) {
8501 rc = -ENOMEM;
8502 goto out_free_mbox;
8503 }
8504
8505 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8506 if (unlikely(rc)) {
8507 kfree(vpd);
8508 goto out_free_mbox;
8509 }
8510
8511 mqe = &mboxq->u.mqe;
8512 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8513 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8514 phba->hba_flag |= HBA_FCOE_MODE;
8515 phba->fcp_embed_io = 0; /* SLI4 FC support only */
8516 } else {
8517 phba->hba_flag &= ~HBA_FCOE_MODE;
8518 }
8519
8520 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8521 LPFC_DCBX_CEE_MODE)
8522 phba->hba_flag |= HBA_FIP_SUPPORT;
8523 else
8524 phba->hba_flag &= ~HBA_FIP_SUPPORT;
8525
8526 phba->hba_flag &= ~HBA_IOQ_FLUSH;
8527
8528 if (phba->sli_rev != LPFC_SLI_REV4) {
8529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8530 "0376 READ_REV Error. SLI Level %d "
8531 "FCoE enabled %d\n",
8532 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8533 rc = -EIO;
8534 kfree(vpd);
8535 goto out_free_mbox;
8536 }
8537
8538 rc = lpfc_set_host_tm(phba);
8539 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8540 "6468 Set host date / time: Status x%x:\n", rc);
8541
8542 /*
8543 * Continue initialization with default values even if driver failed
8544 * to read FCoE param config regions, only read parameters if the
8545 * board is FCoE
8546 */
8547 if (phba->hba_flag & HBA_FCOE_MODE &&
8548 lpfc_sli4_read_fcoe_params(phba))
8549 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8550 "2570 Failed to read FCoE parameters\n");
8551
8552 /*
8553 * Retrieve sli4 device physical port name, failure of doing it
8554 * is considered as non-fatal.
8555 */
8556 rc = lpfc_sli4_retrieve_pport_name(phba);
8557 if (!rc)
8558 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8559 "3080 Successful retrieving SLI4 device "
8560 "physical port name: %s.\n", phba->Port);
8561
8562 rc = lpfc_sli4_get_ctl_attr(phba);
8563 if (!rc)
8564 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8565 "8351 Successful retrieving SLI4 device "
8566 "CTL ATTR\n");
8567
8568 /*
8569 * Evaluate the read rev and vpd data. Populate the driver
8570 * state with the results. If this routine fails, the failure
8571 * is not fatal as the driver will use generic values.
8572 */
8573 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8574 if (unlikely(!rc)) {
8575 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8576 "0377 Error %d parsing vpd. "
8577 "Using defaults.\n", rc);
8578 rc = 0;
8579 }
8580 kfree(vpd);
8581
8582 /* Save information as VPD data */
8583 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8584 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8585
8586 /*
8587 * This is because first G7 ASIC doesn't support the standard
8588 * 0x5a NVME cmd descriptor type/subtype
8589 */
8590 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8591 LPFC_SLI_INTF_IF_TYPE_6) &&
8592 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8593 (phba->vpd.rev.smRev == 0) &&
8594 (phba->cfg_nvme_embed_cmd == 1))
8595 phba->cfg_nvme_embed_cmd = 0;
8596
8597 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8598 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8599 &mqe->un.read_rev);
8600 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8601 &mqe->un.read_rev);
8602 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8603 &mqe->un.read_rev);
8604 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8605 &mqe->un.read_rev);
8606 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8607 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8608 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8609 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8610 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8611 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8612 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8613 "(%d):0380 READ_REV Status x%x "
8614 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8615 mboxq->vport ? mboxq->vport->vpi : 0,
8616 bf_get(lpfc_mqe_status, mqe),
8617 phba->vpd.rev.opFwName,
8618 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8619 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8620
8621 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8622 LPFC_SLI_INTF_IF_TYPE_0) {
8623 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8624 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8625 if (rc == MBX_SUCCESS) {
8626 phba->hba_flag |= HBA_RECOVERABLE_UE;
8627 /* Set 1Sec interval to detect UE */
8628 phba->eratt_poll_interval = 1;
8629 phba->sli4_hba.ue_to_sr = bf_get(
8630 lpfc_mbx_set_feature_UESR,
8631 &mboxq->u.mqe.un.set_feature);
8632 phba->sli4_hba.ue_to_rp = bf_get(
8633 lpfc_mbx_set_feature_UERP,
8634 &mboxq->u.mqe.un.set_feature);
8635 }
8636 }
8637
8638 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8639 /* Enable MDS Diagnostics only if the SLI Port supports it */
8640 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8641 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8642 if (rc != MBX_SUCCESS)
8643 phba->mds_diags_support = 0;
8644 }
8645
8646 /*
8647 * Discover the port's supported feature set and match it against the
8648 * hosts requests.
8649 */
8650 lpfc_request_features(phba, mboxq);
8651 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8652 if (unlikely(rc)) {
8653 rc = -EIO;
8654 goto out_free_mbox;
8655 }
8656
8657 /* Disable VMID if app header is not supported */
8658 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8659 &mqe->un.req_ftrs))) {
8660 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8661 phba->cfg_vmid_app_header = 0;
8662 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8663 "1242 vmid feature not supported\n");
8664 }
8665
8666 /*
8667 * The port must support FCP initiator mode as this is the
8668 * only mode running in the host.
8669 */
8670 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8671 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8672 "0378 No support for fcpi mode.\n");
8673 ftr_rsp++;
8674 }
8675
8676 /* Performance Hints are ONLY for FCoE */
8677 if (phba->hba_flag & HBA_FCOE_MODE) {
8678 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8679 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8680 else
8681 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8682 }
8683
8684 /*
8685 * If the port cannot support the host's requested features
8686 * then turn off the global config parameters to disable the
8687 * feature in the driver. This is not a fatal error.
8688 */
8689 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8690 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8691 phba->cfg_enable_bg = 0;
8692 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8693 ftr_rsp++;
8694 }
8695 }
8696
8697 if (phba->max_vpi && phba->cfg_enable_npiv &&
8698 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8699 ftr_rsp++;
8700
8701 if (ftr_rsp) {
8702 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8703 "0379 Feature Mismatch Data: x%08x %08x "
8704 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8705 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8706 phba->cfg_enable_npiv, phba->max_vpi);
8707 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8708 phba->cfg_enable_bg = 0;
8709 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8710 phba->cfg_enable_npiv = 0;
8711 }
8712
8713 /* These SLI3 features are assumed in SLI4 */
8714 spin_lock_irq(&phba->hbalock);
8715 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8716 spin_unlock_irq(&phba->hbalock);
8717
8718 /* Always try to enable dual dump feature if we can */
8719 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8720 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8721 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8722 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8723 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8724 "6448 Dual Dump is enabled\n");
8725 else
8726 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8727 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8728 "rc:x%x dd:x%x\n",
8729 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8730 lpfc_sli_config_mbox_subsys_get(
8731 phba, mboxq),
8732 lpfc_sli_config_mbox_opcode_get(
8733 phba, mboxq),
8734 rc, dd);
8735 /*
8736 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
8737 * calls depends on these resources to complete port setup.
8738 */
8739 rc = lpfc_sli4_alloc_resource_identifiers(phba);
8740 if (rc) {
8741 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8742 "2920 Failed to alloc Resource IDs "
8743 "rc = x%x\n", rc);
8744 goto out_free_mbox;
8745 }
8746
8747 lpfc_set_host_data(phba, mboxq);
8748
8749 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8750 if (rc) {
8751 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8752 "2134 Failed to set host os driver version %x",
8753 rc);
8754 }
8755
8756 /* Read the port's service parameters. */
8757 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8758 if (rc) {
8759 phba->link_state = LPFC_HBA_ERROR;
8760 rc = -ENOMEM;
8761 goto out_free_mbox;
8762 }
8763
8764 mboxq->vport = vport;
8765 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8766 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
8767 if (rc == MBX_SUCCESS) {
8768 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8769 rc = 0;
8770 }
8771
8772 /*
8773 * This memory was allocated by the lpfc_read_sparam routine but is
8774 * no longer needed. It is released and ctx_buf NULLed to prevent
8775 * unintended pointer access as the mbox is reused.
8776 */
8777 lpfc_mbuf_free(phba, mp->virt, mp->phys);
8778 kfree(mp);
8779 mboxq->ctx_buf = NULL;
8780 if (unlikely(rc)) {
8781 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8782 "0382 READ_SPARAM command failed "
8783 "status %d, mbxStatus x%x\n",
8784 rc, bf_get(lpfc_mqe_status, mqe));
8785 phba->link_state = LPFC_HBA_ERROR;
8786 rc = -EIO;
8787 goto out_free_mbox;
8788 }
8789
8790 lpfc_update_vport_wwn(vport);
8791
8792 /* Update the fc_host data structures with new wwn. */
8793 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8794 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8795
8796 /* Create all the SLI4 queues */
8797 rc = lpfc_sli4_queue_create(phba);
8798 if (rc) {
8799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8800 "3089 Failed to allocate queues\n");
8801 rc = -ENODEV;
8802 goto out_free_mbox;
8803 }
8804 /* Set up all the queues to the device */
8805 rc = lpfc_sli4_queue_setup(phba);
8806 if (unlikely(rc)) {
8807 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8808 "0381 Error %d during queue setup.\n ", rc);
8809 goto out_stop_timers;
8810 }
8811 /* Initialize the driver internal SLI layer lists. */
8812 lpfc_sli4_setup(phba);
8813 lpfc_sli4_queue_init(phba);
8814
8815 /* update host els xri-sgl sizes and mappings */
8816 rc = lpfc_sli4_els_sgl_update(phba);
8817 if (unlikely(rc)) {
8818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8819 "1400 Failed to update xri-sgl size and "
8820 "mapping: %d\n", rc);
8821 goto out_destroy_queue;
8822 }
8823
8824 /* register the els sgl pool to the port */
8825 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8826 phba->sli4_hba.els_xri_cnt);
8827 if (unlikely(rc < 0)) {
8828 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8829 "0582 Error %d during els sgl post "
8830 "operation\n", rc);
8831 rc = -ENODEV;
8832 goto out_destroy_queue;
8833 }
8834 phba->sli4_hba.els_xri_cnt = rc;
8835
8836 if (phba->nvmet_support) {
8837 /* update host nvmet xri-sgl sizes and mappings */
8838 rc = lpfc_sli4_nvmet_sgl_update(phba);
8839 if (unlikely(rc)) {
8840 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8841 "6308 Failed to update nvmet-sgl size "
8842 "and mapping: %d\n", rc);
8843 goto out_destroy_queue;
8844 }
8845
8846 /* register the nvmet sgl pool to the port */
8847 rc = lpfc_sli4_repost_sgl_list(
8848 phba,
8849 &phba->sli4_hba.lpfc_nvmet_sgl_list,
8850 phba->sli4_hba.nvmet_xri_cnt);
8851 if (unlikely(rc < 0)) {
8852 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8853 "3117 Error %d during nvmet "
8854 "sgl post\n", rc);
8855 rc = -ENODEV;
8856 goto out_destroy_queue;
8857 }
8858 phba->sli4_hba.nvmet_xri_cnt = rc;
8859
8860 /* We allocate an iocbq for every receive context SGL.
8861 * The additional allocation is for abort and ls handling.
8862 */
8863 cnt = phba->sli4_hba.nvmet_xri_cnt +
8864 phba->sli4_hba.max_cfg_param.max_xri;
8865 } else {
8866 /* update host common xri-sgl sizes and mappings */
8867 rc = lpfc_sli4_io_sgl_update(phba);
8868 if (unlikely(rc)) {
8869 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8870 "6082 Failed to update nvme-sgl size "
8871 "and mapping: %d\n", rc);
8872 goto out_destroy_queue;
8873 }
8874
8875 /* register the allocated common sgl pool to the port */
8876 rc = lpfc_sli4_repost_io_sgl_list(phba);
8877 if (unlikely(rc)) {
8878 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8879 "6116 Error %d during nvme sgl post "
8880 "operation\n", rc);
8881 /* Some NVME buffers were moved to abort nvme list */
8882 /* A pci function reset will repost them */
8883 rc = -ENODEV;
8884 goto out_destroy_queue;
8885 }
8886 /* Each lpfc_io_buf job structure has an iocbq element.
8887 * This cnt provides for abort, els, ct and ls requests.
8888 */
8889 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8890 }
8891
8892 if (!phba->sli.iocbq_lookup) {
8893 /* Initialize and populate the iocb list per host */
8894 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8895 "2821 initialize iocb list with %d entries\n",
8896 cnt);
8897 rc = lpfc_init_iocb_list(phba, cnt);
8898 if (rc) {
8899 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8900 "1413 Failed to init iocb list.\n");
8901 goto out_destroy_queue;
8902 }
8903 }
8904
8905 if (phba->nvmet_support)
8906 lpfc_nvmet_create_targetport(phba);
8907
8908 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8909 /* Post initial buffers to all RQs created */
8910 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8911 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8912 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8913 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8914 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8915 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8916 rqbp->buffer_count = 0;
8917
8918 lpfc_post_rq_buffer(
8919 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8920 phba->sli4_hba.nvmet_mrq_data[i],
8921 phba->cfg_nvmet_mrq_post, i);
8922 }
8923 }
8924
8925 /* Post the rpi header region to the device. */
8926 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8927 if (unlikely(rc)) {
8928 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8929 "0393 Error %d during rpi post operation\n",
8930 rc);
8931 rc = -ENODEV;
8932 goto out_free_iocblist;
8933 }
8934 lpfc_sli4_node_prep(phba);
8935
8936 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8937 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8938 /*
8939 * The FC Port needs to register FCFI (index 0)
8940 */
8941 lpfc_reg_fcfi(phba, mboxq);
8942 mboxq->vport = phba->pport;
8943 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8944 if (rc != MBX_SUCCESS)
8945 goto out_unset_queue;
8946 rc = 0;
8947 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8948 &mboxq->u.mqe.un.reg_fcfi);
8949 } else {
8950 /* We are a NVME Target mode with MRQ > 1 */
8951
8952 /* First register the FCFI */
8953 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8954 mboxq->vport = phba->pport;
8955 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8956 if (rc != MBX_SUCCESS)
8957 goto out_unset_queue;
8958 rc = 0;
8959 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8960 &mboxq->u.mqe.un.reg_fcfi_mrq);
8961
8962 /* Next register the MRQs */
8963 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8964 mboxq->vport = phba->pport;
8965 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8966 if (rc != MBX_SUCCESS)
8967 goto out_unset_queue;
8968 rc = 0;
8969 }
8970 /* Check if the port is configured to be disabled */
8971 lpfc_sli_read_link_ste(phba);
8972 }
8973
8974 /* Don't post more new bufs if repost already recovered
8975 * the nvme sgls.
8976 */
8977 if (phba->nvmet_support == 0) {
8978 if (phba->sli4_hba.io_xri_cnt == 0) {
8979 len = lpfc_new_io_buf(
8980 phba, phba->sli4_hba.io_xri_max);
8981 if (len == 0) {
8982 rc = -ENOMEM;
8983 goto out_unset_queue;
8984 }
8985
8986 if (phba->cfg_xri_rebalancing)
8987 lpfc_create_multixri_pools(phba);
8988 }
8989 } else {
8990 phba->cfg_xri_rebalancing = 0;
8991 }
8992
8993 /* Allow asynchronous mailbox command to go through */
8994 spin_lock_irq(&phba->hbalock);
8995 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8996 spin_unlock_irq(&phba->hbalock);
8997
8998 /* Post receive buffers to the device */
8999 lpfc_sli4_rb_setup(phba);
9000
9001 /* Reset HBA FCF states after HBA reset */
9002 phba->fcf.fcf_flag = 0;
9003 phba->fcf.current_rec.flag = 0;
9004
9005 /* Start the ELS watchdog timer */
9006 mod_timer(&vport->els_tmofunc,
9007 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
9008
9009 /* Start heart beat timer */
9010 mod_timer(&phba->hb_tmofunc,
9011 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
9012 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
9013 phba->last_completion_time = jiffies;
9014
9015 /* start eq_delay heartbeat */
9016 if (phba->cfg_auto_imax)
9017 queue_delayed_work(phba->wq, &phba->eq_delay_work,
9018 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
9019
9020 /* start per phba idle_stat_delay heartbeat */
9021 lpfc_init_idle_stat_hb(phba);
9022
9023 /* Start error attention (ERATT) polling timer */
9024 mod_timer(&phba->eratt_poll,
9025 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9026
9027 /*
9028 * The port is ready, set the host's link state to LINK_DOWN
9029 * in preparation for link interrupts.
9030 */
9031 spin_lock_irq(&phba->hbalock);
9032 phba->link_state = LPFC_LINK_DOWN;
9033
9034 /* Check if physical ports are trunked */
9035 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
9036 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
9037 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
9038 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
9039 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
9040 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
9041 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
9042 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
9043 spin_unlock_irq(&phba->hbalock);
9044
9045 /* Arm the CQs and then EQs on device */
9046 lpfc_sli4_arm_cqeq_intr(phba);
9047
9048 /* Indicate device interrupt mode */
9049 phba->sli4_hba.intr_enable = 1;
9050
9051 /* Setup CMF after HBA is initialized */
9052 lpfc_cmf_setup(phba);
9053
9054 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
9055 (phba->hba_flag & LINK_DISABLED)) {
9056 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9057 "3103 Adapter Link is disabled.\n");
9058 lpfc_down_link(phba, mboxq);
9059 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9060 if (rc != MBX_SUCCESS) {
9061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9062 "3104 Adapter failed to issue "
9063 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
9064 goto out_io_buff_free;
9065 }
9066 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
9067 /* don't perform init_link on SLI4 FC port loopback test */
9068 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
9069 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
9070 if (rc)
9071 goto out_io_buff_free;
9072 }
9073 }
9074 mempool_free(mboxq, phba->mbox_mem_pool);
9075
9076 /* Enable RAS FW log support */
9077 lpfc_sli4_ras_setup(phba);
9078
9079 phba->hba_flag |= HBA_SETUP;
9080 return rc;
9081
9082 out_io_buff_free:
9083 /* Free allocated IO Buffers */
9084 lpfc_io_free(phba);
9085 out_unset_queue:
9086 /* Unset all the queues set up in this routine when error out */
9087 lpfc_sli4_queue_unset(phba);
9088 out_free_iocblist:
9089 lpfc_free_iocb_list(phba);
9090 out_destroy_queue:
9091 lpfc_sli4_queue_destroy(phba);
9092 out_stop_timers:
9093 lpfc_stop_hba_timers(phba);
9094 out_free_mbox:
9095 mempool_free(mboxq, phba->mbox_mem_pool);
9096 return rc;
9097 }
9098
9099 /**
9100 * lpfc_mbox_timeout - Timeout call back function for mbox timer
9101 * @t: Context to fetch pointer to hba structure from.
9102 *
9103 * This is the callback function for mailbox timer. The mailbox
9104 * timer is armed when a new mailbox command is issued and the timer
9105 * is deleted when the mailbox complete. The function is called by
9106 * the kernel timer code when a mailbox does not complete within
9107 * expected time. This function wakes up the worker thread to
9108 * process the mailbox timeout and returns. All the processing is
9109 * done by the worker thread function lpfc_mbox_timeout_handler.
9110 **/
9111 void
lpfc_mbox_timeout(struct timer_list * t)9112 lpfc_mbox_timeout(struct timer_list *t)
9113 {
9114 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
9115 unsigned long iflag;
9116 uint32_t tmo_posted;
9117
9118 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
9119 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
9120 if (!tmo_posted)
9121 phba->pport->work_port_events |= WORKER_MBOX_TMO;
9122 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
9123
9124 if (!tmo_posted)
9125 lpfc_worker_wake_up(phba);
9126 return;
9127 }
9128
9129 /**
9130 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
9131 * are pending
9132 * @phba: Pointer to HBA context object.
9133 *
9134 * This function checks if any mailbox completions are present on the mailbox
9135 * completion queue.
9136 **/
9137 static bool
lpfc_sli4_mbox_completions_pending(struct lpfc_hba * phba)9138 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
9139 {
9140
9141 uint32_t idx;
9142 struct lpfc_queue *mcq;
9143 struct lpfc_mcqe *mcqe;
9144 bool pending_completions = false;
9145 uint8_t qe_valid;
9146
9147 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9148 return false;
9149
9150 /* Check for completions on mailbox completion queue */
9151
9152 mcq = phba->sli4_hba.mbx_cq;
9153 idx = mcq->hba_index;
9154 qe_valid = mcq->qe_valid;
9155 while (bf_get_le32(lpfc_cqe_valid,
9156 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
9157 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
9158 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
9159 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
9160 pending_completions = true;
9161 break;
9162 }
9163 idx = (idx + 1) % mcq->entry_count;
9164 if (mcq->hba_index == idx)
9165 break;
9166
9167 /* if the index wrapped around, toggle the valid bit */
9168 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
9169 qe_valid = (qe_valid) ? 0 : 1;
9170 }
9171 return pending_completions;
9172
9173 }
9174
9175 /**
9176 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
9177 * that were missed.
9178 * @phba: Pointer to HBA context object.
9179 *
9180 * For sli4, it is possible to miss an interrupt. As such mbox completions
9181 * maybe missed causing erroneous mailbox timeouts to occur. This function
9182 * checks to see if mbox completions are on the mailbox completion queue
9183 * and will process all the completions associated with the eq for the
9184 * mailbox completion queue.
9185 **/
9186 static bool
lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba * phba)9187 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
9188 {
9189 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
9190 uint32_t eqidx;
9191 struct lpfc_queue *fpeq = NULL;
9192 struct lpfc_queue *eq;
9193 bool mbox_pending;
9194
9195 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9196 return false;
9197
9198 /* Find the EQ associated with the mbox CQ */
9199 if (sli4_hba->hdwq) {
9200 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
9201 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
9202 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
9203 fpeq = eq;
9204 break;
9205 }
9206 }
9207 }
9208 if (!fpeq)
9209 return false;
9210
9211 /* Turn off interrupts from this EQ */
9212
9213 sli4_hba->sli4_eq_clr_intr(fpeq);
9214
9215 /* Check to see if a mbox completion is pending */
9216
9217 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
9218
9219 /*
9220 * If a mbox completion is pending, process all the events on EQ
9221 * associated with the mbox completion queue (this could include
9222 * mailbox commands, async events, els commands, receive queue data
9223 * and fcp commands)
9224 */
9225
9226 if (mbox_pending)
9227 /* process and rearm the EQ */
9228 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
9229 LPFC_QUEUE_WORK);
9230 else
9231 /* Always clear and re-arm the EQ */
9232 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
9233
9234 return mbox_pending;
9235
9236 }
9237
9238 /**
9239 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
9240 * @phba: Pointer to HBA context object.
9241 *
9242 * This function is called from worker thread when a mailbox command times out.
9243 * The caller is not required to hold any locks. This function will reset the
9244 * HBA and recover all the pending commands.
9245 **/
9246 void
lpfc_mbox_timeout_handler(struct lpfc_hba * phba)9247 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9248 {
9249 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9250 MAILBOX_t *mb = NULL;
9251
9252 struct lpfc_sli *psli = &phba->sli;
9253
9254 /* If the mailbox completed, process the completion */
9255 lpfc_sli4_process_missed_mbox_completions(phba);
9256
9257 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9258 return;
9259
9260 if (pmbox != NULL)
9261 mb = &pmbox->u.mb;
9262 /* Check the pmbox pointer first. There is a race condition
9263 * between the mbox timeout handler getting executed in the
9264 * worklist and the mailbox actually completing. When this
9265 * race condition occurs, the mbox_active will be NULL.
9266 */
9267 spin_lock_irq(&phba->hbalock);
9268 if (pmbox == NULL) {
9269 lpfc_printf_log(phba, KERN_WARNING,
9270 LOG_MBOX | LOG_SLI,
9271 "0353 Active Mailbox cleared - mailbox timeout "
9272 "exiting\n");
9273 spin_unlock_irq(&phba->hbalock);
9274 return;
9275 }
9276
9277 /* Mbox cmd <mbxCommand> timeout */
9278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9279 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9280 mb->mbxCommand,
9281 phba->pport->port_state,
9282 phba->sli.sli_flag,
9283 phba->sli.mbox_active);
9284 spin_unlock_irq(&phba->hbalock);
9285
9286 /* Setting state unknown so lpfc_sli_abort_iocb_ring
9287 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
9288 * it to fail all outstanding SCSI IO.
9289 */
9290 set_bit(MBX_TMO_ERR, &phba->bit_flags);
9291 spin_lock_irq(&phba->pport->work_port_lock);
9292 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9293 spin_unlock_irq(&phba->pport->work_port_lock);
9294 spin_lock_irq(&phba->hbalock);
9295 phba->link_state = LPFC_LINK_UNKNOWN;
9296 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9297 spin_unlock_irq(&phba->hbalock);
9298
9299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9300 "0345 Resetting board due to mailbox timeout\n");
9301
9302 /* Reset the HBA device */
9303 lpfc_reset_hba(phba);
9304 }
9305
9306 /**
9307 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9308 * @phba: Pointer to HBA context object.
9309 * @pmbox: Pointer to mailbox object.
9310 * @flag: Flag indicating how the mailbox need to be processed.
9311 *
9312 * This function is called by discovery code and HBA management code
9313 * to submit a mailbox command to firmware with SLI-3 interface spec. This
9314 * function gets the hbalock to protect the data structures.
9315 * The mailbox command can be submitted in polling mode, in which case
9316 * this function will wait in a polling loop for the completion of the
9317 * mailbox.
9318 * If the mailbox is submitted in no_wait mode (not polling) the
9319 * function will submit the command and returns immediately without waiting
9320 * for the mailbox completion. The no_wait is supported only when HBA
9321 * is in SLI2/SLI3 mode - interrupts are enabled.
9322 * The SLI interface allows only one mailbox pending at a time. If the
9323 * mailbox is issued in polling mode and there is already a mailbox
9324 * pending, then the function will return an error. If the mailbox is issued
9325 * in NO_WAIT mode and there is a mailbox pending already, the function
9326 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9327 * The sli layer owns the mailbox object until the completion of mailbox
9328 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9329 * return codes the caller owns the mailbox command after the return of
9330 * the function.
9331 **/
9332 static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmbox,uint32_t flag)9333 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9334 uint32_t flag)
9335 {
9336 MAILBOX_t *mbx;
9337 struct lpfc_sli *psli = &phba->sli;
9338 uint32_t status, evtctr;
9339 uint32_t ha_copy, hc_copy;
9340 int i;
9341 unsigned long timeout;
9342 unsigned long drvr_flag = 0;
9343 uint32_t word0, ldata;
9344 void __iomem *to_slim;
9345 int processing_queue = 0;
9346
9347 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9348 if (!pmbox) {
9349 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9350 /* processing mbox queue from intr_handler */
9351 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9352 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9353 return MBX_SUCCESS;
9354 }
9355 processing_queue = 1;
9356 pmbox = lpfc_mbox_get(phba);
9357 if (!pmbox) {
9358 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9359 return MBX_SUCCESS;
9360 }
9361 }
9362
9363 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9364 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9365 if(!pmbox->vport) {
9366 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9367 lpfc_printf_log(phba, KERN_ERR,
9368 LOG_MBOX | LOG_VPORT,
9369 "1806 Mbox x%x failed. No vport\n",
9370 pmbox->u.mb.mbxCommand);
9371 dump_stack();
9372 goto out_not_finished;
9373 }
9374 }
9375
9376 /* If the PCI channel is in offline state, do not post mbox. */
9377 if (unlikely(pci_channel_offline(phba->pcidev))) {
9378 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9379 goto out_not_finished;
9380 }
9381
9382 /* If HBA has a deferred error attention, fail the iocb. */
9383 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9384 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9385 goto out_not_finished;
9386 }
9387
9388 psli = &phba->sli;
9389
9390 mbx = &pmbox->u.mb;
9391 status = MBX_SUCCESS;
9392
9393 if (phba->link_state == LPFC_HBA_ERROR) {
9394 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9395
9396 /* Mbox command <mbxCommand> cannot issue */
9397 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9398 "(%d):0311 Mailbox command x%x cannot "
9399 "issue Data: x%x x%x\n",
9400 pmbox->vport ? pmbox->vport->vpi : 0,
9401 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9402 goto out_not_finished;
9403 }
9404
9405 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9406 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9407 !(hc_copy & HC_MBINT_ENA)) {
9408 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9410 "(%d):2528 Mailbox command x%x cannot "
9411 "issue Data: x%x x%x\n",
9412 pmbox->vport ? pmbox->vport->vpi : 0,
9413 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9414 goto out_not_finished;
9415 }
9416 }
9417
9418 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9419 /* Polling for a mbox command when another one is already active
9420 * is not allowed in SLI. Also, the driver must have established
9421 * SLI2 mode to queue and process multiple mbox commands.
9422 */
9423
9424 if (flag & MBX_POLL) {
9425 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9426
9427 /* Mbox command <mbxCommand> cannot issue */
9428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9429 "(%d):2529 Mailbox command x%x "
9430 "cannot issue Data: x%x x%x\n",
9431 pmbox->vport ? pmbox->vport->vpi : 0,
9432 pmbox->u.mb.mbxCommand,
9433 psli->sli_flag, flag);
9434 goto out_not_finished;
9435 }
9436
9437 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9438 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9439 /* Mbox command <mbxCommand> cannot issue */
9440 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9441 "(%d):2530 Mailbox command x%x "
9442 "cannot issue Data: x%x x%x\n",
9443 pmbox->vport ? pmbox->vport->vpi : 0,
9444 pmbox->u.mb.mbxCommand,
9445 psli->sli_flag, flag);
9446 goto out_not_finished;
9447 }
9448
9449 /* Another mailbox command is still being processed, queue this
9450 * command to be processed later.
9451 */
9452 lpfc_mbox_put(phba, pmbox);
9453
9454 /* Mbox cmd issue - BUSY */
9455 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9456 "(%d):0308 Mbox cmd issue - BUSY Data: "
9457 "x%x x%x x%x x%x\n",
9458 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9459 mbx->mbxCommand,
9460 phba->pport ? phba->pport->port_state : 0xff,
9461 psli->sli_flag, flag);
9462
9463 psli->slistat.mbox_busy++;
9464 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9465
9466 if (pmbox->vport) {
9467 lpfc_debugfs_disc_trc(pmbox->vport,
9468 LPFC_DISC_TRC_MBOX_VPORT,
9469 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
9470 (uint32_t)mbx->mbxCommand,
9471 mbx->un.varWords[0], mbx->un.varWords[1]);
9472 }
9473 else {
9474 lpfc_debugfs_disc_trc(phba->pport,
9475 LPFC_DISC_TRC_MBOX,
9476 "MBOX Bsy: cmd:x%x mb:x%x x%x",
9477 (uint32_t)mbx->mbxCommand,
9478 mbx->un.varWords[0], mbx->un.varWords[1]);
9479 }
9480
9481 return MBX_BUSY;
9482 }
9483
9484 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9485
9486 /* If we are not polling, we MUST be in SLI2 mode */
9487 if (flag != MBX_POLL) {
9488 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9489 (mbx->mbxCommand != MBX_KILL_BOARD)) {
9490 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9491 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9492 /* Mbox command <mbxCommand> cannot issue */
9493 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9494 "(%d):2531 Mailbox command x%x "
9495 "cannot issue Data: x%x x%x\n",
9496 pmbox->vport ? pmbox->vport->vpi : 0,
9497 pmbox->u.mb.mbxCommand,
9498 psli->sli_flag, flag);
9499 goto out_not_finished;
9500 }
9501 /* timeout active mbox command */
9502 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9503 1000);
9504 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9505 }
9506
9507 /* Mailbox cmd <cmd> issue */
9508 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9509 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9510 "x%x\n",
9511 pmbox->vport ? pmbox->vport->vpi : 0,
9512 mbx->mbxCommand,
9513 phba->pport ? phba->pport->port_state : 0xff,
9514 psli->sli_flag, flag);
9515
9516 if (mbx->mbxCommand != MBX_HEARTBEAT) {
9517 if (pmbox->vport) {
9518 lpfc_debugfs_disc_trc(pmbox->vport,
9519 LPFC_DISC_TRC_MBOX_VPORT,
9520 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9521 (uint32_t)mbx->mbxCommand,
9522 mbx->un.varWords[0], mbx->un.varWords[1]);
9523 }
9524 else {
9525 lpfc_debugfs_disc_trc(phba->pport,
9526 LPFC_DISC_TRC_MBOX,
9527 "MBOX Send: cmd:x%x mb:x%x x%x",
9528 (uint32_t)mbx->mbxCommand,
9529 mbx->un.varWords[0], mbx->un.varWords[1]);
9530 }
9531 }
9532
9533 psli->slistat.mbox_cmd++;
9534 evtctr = psli->slistat.mbox_event;
9535
9536 /* next set own bit for the adapter and copy over command word */
9537 mbx->mbxOwner = OWN_CHIP;
9538
9539 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9540 /* Populate mbox extension offset word. */
9541 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9542 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9543 = (uint8_t *)phba->mbox_ext
9544 - (uint8_t *)phba->mbox;
9545 }
9546
9547 /* Copy the mailbox extension data */
9548 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
9549 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
9550 (uint8_t *)phba->mbox_ext,
9551 pmbox->in_ext_byte_len);
9552 }
9553 /* Copy command data to host SLIM area */
9554 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9555 } else {
9556 /* Populate mbox extension offset word. */
9557 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9558 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9559 = MAILBOX_HBA_EXT_OFFSET;
9560
9561 /* Copy the mailbox extension data */
9562 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
9563 lpfc_memcpy_to_slim(phba->MBslimaddr +
9564 MAILBOX_HBA_EXT_OFFSET,
9565 pmbox->ctx_buf, pmbox->in_ext_byte_len);
9566
9567 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9568 /* copy command data into host mbox for cmpl */
9569 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9570 MAILBOX_CMD_SIZE);
9571
9572 /* First copy mbox command data to HBA SLIM, skip past first
9573 word */
9574 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9575 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9576 MAILBOX_CMD_SIZE - sizeof (uint32_t));
9577
9578 /* Next copy over first word, with mbxOwner set */
9579 ldata = *((uint32_t *)mbx);
9580 to_slim = phba->MBslimaddr;
9581 writel(ldata, to_slim);
9582 readl(to_slim); /* flush */
9583
9584 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9585 /* switch over to host mailbox */
9586 psli->sli_flag |= LPFC_SLI_ACTIVE;
9587 }
9588
9589 wmb();
9590
9591 switch (flag) {
9592 case MBX_NOWAIT:
9593 /* Set up reference to mailbox command */
9594 psli->mbox_active = pmbox;
9595 /* Interrupt board to do it */
9596 writel(CA_MBATT, phba->CAregaddr);
9597 readl(phba->CAregaddr); /* flush */
9598 /* Don't wait for it to finish, just return */
9599 break;
9600
9601 case MBX_POLL:
9602 /* Set up null reference to mailbox command */
9603 psli->mbox_active = NULL;
9604 /* Interrupt board to do it */
9605 writel(CA_MBATT, phba->CAregaddr);
9606 readl(phba->CAregaddr); /* flush */
9607
9608 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9609 /* First read mbox status word */
9610 word0 = *((uint32_t *)phba->mbox);
9611 word0 = le32_to_cpu(word0);
9612 } else {
9613 /* First read mbox status word */
9614 if (lpfc_readl(phba->MBslimaddr, &word0)) {
9615 spin_unlock_irqrestore(&phba->hbalock,
9616 drvr_flag);
9617 goto out_not_finished;
9618 }
9619 }
9620
9621 /* Read the HBA Host Attention Register */
9622 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9623 spin_unlock_irqrestore(&phba->hbalock,
9624 drvr_flag);
9625 goto out_not_finished;
9626 }
9627 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9628 1000) + jiffies;
9629 i = 0;
9630 /* Wait for command to complete */
9631 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9632 (!(ha_copy & HA_MBATT) &&
9633 (phba->link_state > LPFC_WARM_START))) {
9634 if (time_after(jiffies, timeout)) {
9635 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9636 spin_unlock_irqrestore(&phba->hbalock,
9637 drvr_flag);
9638 goto out_not_finished;
9639 }
9640
9641 /* Check if we took a mbox interrupt while we were
9642 polling */
9643 if (((word0 & OWN_CHIP) != OWN_CHIP)
9644 && (evtctr != psli->slistat.mbox_event))
9645 break;
9646
9647 if (i++ > 10) {
9648 spin_unlock_irqrestore(&phba->hbalock,
9649 drvr_flag);
9650 msleep(1);
9651 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9652 }
9653
9654 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9655 /* First copy command data */
9656 word0 = *((uint32_t *)phba->mbox);
9657 word0 = le32_to_cpu(word0);
9658 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9659 MAILBOX_t *slimmb;
9660 uint32_t slimword0;
9661 /* Check real SLIM for any errors */
9662 slimword0 = readl(phba->MBslimaddr);
9663 slimmb = (MAILBOX_t *) & slimword0;
9664 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9665 && slimmb->mbxStatus) {
9666 psli->sli_flag &=
9667 ~LPFC_SLI_ACTIVE;
9668 word0 = slimword0;
9669 }
9670 }
9671 } else {
9672 /* First copy command data */
9673 word0 = readl(phba->MBslimaddr);
9674 }
9675 /* Read the HBA Host Attention Register */
9676 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9677 spin_unlock_irqrestore(&phba->hbalock,
9678 drvr_flag);
9679 goto out_not_finished;
9680 }
9681 }
9682
9683 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9684 /* copy results back to user */
9685 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9686 MAILBOX_CMD_SIZE);
9687 /* Copy the mailbox extension data */
9688 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9689 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9690 pmbox->ctx_buf,
9691 pmbox->out_ext_byte_len);
9692 }
9693 } else {
9694 /* First copy command data */
9695 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9696 MAILBOX_CMD_SIZE);
9697 /* Copy the mailbox extension data */
9698 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9699 lpfc_memcpy_from_slim(
9700 pmbox->ctx_buf,
9701 phba->MBslimaddr +
9702 MAILBOX_HBA_EXT_OFFSET,
9703 pmbox->out_ext_byte_len);
9704 }
9705 }
9706
9707 writel(HA_MBATT, phba->HAregaddr);
9708 readl(phba->HAregaddr); /* flush */
9709
9710 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9711 status = mbx->mbxStatus;
9712 }
9713
9714 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9715 return status;
9716
9717 out_not_finished:
9718 if (processing_queue) {
9719 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9720 lpfc_mbox_cmpl_put(phba, pmbox);
9721 }
9722 return MBX_NOT_FINISHED;
9723 }
9724
9725 /**
9726 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9727 * @phba: Pointer to HBA context object.
9728 *
9729 * The function blocks the posting of SLI4 asynchronous mailbox commands from
9730 * the driver internal pending mailbox queue. It will then try to wait out the
9731 * possible outstanding mailbox command before return.
9732 *
9733 * Returns:
9734 * 0 - the outstanding mailbox command completed; otherwise, the wait for
9735 * the outstanding mailbox command timed out.
9736 **/
9737 static int
lpfc_sli4_async_mbox_block(struct lpfc_hba * phba)9738 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9739 {
9740 struct lpfc_sli *psli = &phba->sli;
9741 LPFC_MBOXQ_t *mboxq;
9742 int rc = 0;
9743 unsigned long timeout = 0;
9744 u32 sli_flag;
9745 u8 cmd, subsys, opcode;
9746
9747 /* Mark the asynchronous mailbox command posting as blocked */
9748 spin_lock_irq(&phba->hbalock);
9749 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9750 /* Determine how long we might wait for the active mailbox
9751 * command to be gracefully completed by firmware.
9752 */
9753 if (phba->sli.mbox_active)
9754 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9755 phba->sli.mbox_active) *
9756 1000) + jiffies;
9757 spin_unlock_irq(&phba->hbalock);
9758
9759 /* Make sure the mailbox is really active */
9760 if (timeout)
9761 lpfc_sli4_process_missed_mbox_completions(phba);
9762
9763 /* Wait for the outstanding mailbox command to complete */
9764 while (phba->sli.mbox_active) {
9765 /* Check active mailbox complete status every 2ms */
9766 msleep(2);
9767 if (time_after(jiffies, timeout)) {
9768 /* Timeout, mark the outstanding cmd not complete */
9769
9770 /* Sanity check sli.mbox_active has not completed or
9771 * cancelled from another context during last 2ms sleep,
9772 * so take hbalock to be sure before logging.
9773 */
9774 spin_lock_irq(&phba->hbalock);
9775 if (phba->sli.mbox_active) {
9776 mboxq = phba->sli.mbox_active;
9777 cmd = mboxq->u.mb.mbxCommand;
9778 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9779 mboxq);
9780 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9781 mboxq);
9782 sli_flag = psli->sli_flag;
9783 spin_unlock_irq(&phba->hbalock);
9784 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9785 "2352 Mailbox command x%x "
9786 "(x%x/x%x) sli_flag x%x could "
9787 "not complete\n",
9788 cmd, subsys, opcode,
9789 sli_flag);
9790 } else {
9791 spin_unlock_irq(&phba->hbalock);
9792 }
9793
9794 rc = 1;
9795 break;
9796 }
9797 }
9798
9799 /* Can not cleanly block async mailbox command, fails it */
9800 if (rc) {
9801 spin_lock_irq(&phba->hbalock);
9802 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9803 spin_unlock_irq(&phba->hbalock);
9804 }
9805 return rc;
9806 }
9807
9808 /**
9809 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9810 * @phba: Pointer to HBA context object.
9811 *
9812 * The function unblocks and resume posting of SLI4 asynchronous mailbox
9813 * commands from the driver internal pending mailbox queue. It makes sure
9814 * that there is no outstanding mailbox command before resuming posting
9815 * asynchronous mailbox commands. If, for any reason, there is outstanding
9816 * mailbox command, it will try to wait it out before resuming asynchronous
9817 * mailbox command posting.
9818 **/
9819 static void
lpfc_sli4_async_mbox_unblock(struct lpfc_hba * phba)9820 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9821 {
9822 struct lpfc_sli *psli = &phba->sli;
9823
9824 spin_lock_irq(&phba->hbalock);
9825 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9826 /* Asynchronous mailbox posting is not blocked, do nothing */
9827 spin_unlock_irq(&phba->hbalock);
9828 return;
9829 }
9830
9831 /* Outstanding synchronous mailbox command is guaranteed to be done,
9832 * successful or timeout, after timing-out the outstanding mailbox
9833 * command shall always be removed, so just unblock posting async
9834 * mailbox command and resume
9835 */
9836 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9837 spin_unlock_irq(&phba->hbalock);
9838
9839 /* wake up worker thread to post asynchronous mailbox command */
9840 lpfc_worker_wake_up(phba);
9841 }
9842
9843 /**
9844 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9845 * @phba: Pointer to HBA context object.
9846 * @mboxq: Pointer to mailbox object.
9847 *
9848 * The function waits for the bootstrap mailbox register ready bit from
9849 * port for twice the regular mailbox command timeout value.
9850 *
9851 * 0 - no timeout on waiting for bootstrap mailbox register ready.
9852 * MBXERR_ERROR - wait for bootstrap mailbox register timed out or port
9853 * is in an unrecoverable state.
9854 **/
9855 static int
lpfc_sli4_wait_bmbx_ready(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)9856 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9857 {
9858 uint32_t db_ready;
9859 unsigned long timeout;
9860 struct lpfc_register bmbx_reg;
9861 struct lpfc_register portstat_reg = {-1};
9862
9863 /* Sanity check - there is no point to wait if the port is in an
9864 * unrecoverable state.
9865 */
9866 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
9867 LPFC_SLI_INTF_IF_TYPE_2) {
9868 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9869 &portstat_reg.word0) ||
9870 lpfc_sli4_unrecoverable_port(&portstat_reg)) {
9871 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9872 "3858 Skipping bmbx ready because "
9873 "Port Status x%x\n",
9874 portstat_reg.word0);
9875 return MBXERR_ERROR;
9876 }
9877 }
9878
9879 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9880 * 1000) + jiffies;
9881
9882 do {
9883 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9884 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9885 if (!db_ready)
9886 mdelay(2);
9887
9888 if (time_after(jiffies, timeout))
9889 return MBXERR_ERROR;
9890 } while (!db_ready);
9891
9892 return 0;
9893 }
9894
9895 /**
9896 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9897 * @phba: Pointer to HBA context object.
9898 * @mboxq: Pointer to mailbox object.
9899 *
9900 * The function posts a mailbox to the port. The mailbox is expected
9901 * to be comletely filled in and ready for the port to operate on it.
9902 * This routine executes a synchronous completion operation on the
9903 * mailbox by polling for its completion.
9904 *
9905 * The caller must not be holding any locks when calling this routine.
9906 *
9907 * Returns:
9908 * MBX_SUCCESS - mailbox posted successfully
9909 * Any of the MBX error values.
9910 **/
9911 static int
lpfc_sli4_post_sync_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)9912 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9913 {
9914 int rc = MBX_SUCCESS;
9915 unsigned long iflag;
9916 uint32_t mcqe_status;
9917 uint32_t mbx_cmnd;
9918 struct lpfc_sli *psli = &phba->sli;
9919 struct lpfc_mqe *mb = &mboxq->u.mqe;
9920 struct lpfc_bmbx_create *mbox_rgn;
9921 struct dma_address *dma_address;
9922
9923 /*
9924 * Only one mailbox can be active to the bootstrap mailbox region
9925 * at a time and there is no queueing provided.
9926 */
9927 spin_lock_irqsave(&phba->hbalock, iflag);
9928 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9929 spin_unlock_irqrestore(&phba->hbalock, iflag);
9930 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9931 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9932 "cannot issue Data: x%x x%x\n",
9933 mboxq->vport ? mboxq->vport->vpi : 0,
9934 mboxq->u.mb.mbxCommand,
9935 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9936 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9937 psli->sli_flag, MBX_POLL);
9938 return MBXERR_ERROR;
9939 }
9940 /* The server grabs the token and owns it until release */
9941 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9942 phba->sli.mbox_active = mboxq;
9943 spin_unlock_irqrestore(&phba->hbalock, iflag);
9944
9945 /* wait for bootstrap mbox register for readyness */
9946 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9947 if (rc)
9948 goto exit;
9949 /*
9950 * Initialize the bootstrap memory region to avoid stale data areas
9951 * in the mailbox post. Then copy the caller's mailbox contents to
9952 * the bmbx mailbox region.
9953 */
9954 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9955 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9956 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9957 sizeof(struct lpfc_mqe));
9958
9959 /* Post the high mailbox dma address to the port and wait for ready. */
9960 dma_address = &phba->sli4_hba.bmbx.dma_address;
9961 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9962
9963 /* wait for bootstrap mbox register for hi-address write done */
9964 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9965 if (rc)
9966 goto exit;
9967
9968 /* Post the low mailbox dma address to the port. */
9969 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9970
9971 /* wait for bootstrap mbox register for low address write done */
9972 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9973 if (rc)
9974 goto exit;
9975
9976 /*
9977 * Read the CQ to ensure the mailbox has completed.
9978 * If so, update the mailbox status so that the upper layers
9979 * can complete the request normally.
9980 */
9981 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9982 sizeof(struct lpfc_mqe));
9983 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9984 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9985 sizeof(struct lpfc_mcqe));
9986 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9987 /*
9988 * When the CQE status indicates a failure and the mailbox status
9989 * indicates success then copy the CQE status into the mailbox status
9990 * (and prefix it with x4000).
9991 */
9992 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9993 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9994 bf_set(lpfc_mqe_status, mb,
9995 (LPFC_MBX_ERROR_RANGE | mcqe_status));
9996 rc = MBXERR_ERROR;
9997 } else
9998 lpfc_sli4_swap_str(phba, mboxq);
9999
10000 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10001 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
10002 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
10003 " x%x x%x CQ: x%x x%x x%x x%x\n",
10004 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10005 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10006 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10007 bf_get(lpfc_mqe_status, mb),
10008 mb->un.mb_words[0], mb->un.mb_words[1],
10009 mb->un.mb_words[2], mb->un.mb_words[3],
10010 mb->un.mb_words[4], mb->un.mb_words[5],
10011 mb->un.mb_words[6], mb->un.mb_words[7],
10012 mb->un.mb_words[8], mb->un.mb_words[9],
10013 mb->un.mb_words[10], mb->un.mb_words[11],
10014 mb->un.mb_words[12], mboxq->mcqe.word0,
10015 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
10016 mboxq->mcqe.trailer);
10017 exit:
10018 /* We are holding the token, no needed for lock when release */
10019 spin_lock_irqsave(&phba->hbalock, iflag);
10020 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10021 phba->sli.mbox_active = NULL;
10022 spin_unlock_irqrestore(&phba->hbalock, iflag);
10023 return rc;
10024 }
10025
10026 /**
10027 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
10028 * @phba: Pointer to HBA context object.
10029 * @mboxq: Pointer to mailbox object.
10030 * @flag: Flag indicating how the mailbox need to be processed.
10031 *
10032 * This function is called by discovery code and HBA management code to submit
10033 * a mailbox command to firmware with SLI-4 interface spec.
10034 *
10035 * Return codes the caller owns the mailbox command after the return of the
10036 * function.
10037 **/
10038 static int
lpfc_sli_issue_mbox_s4(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint32_t flag)10039 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
10040 uint32_t flag)
10041 {
10042 struct lpfc_sli *psli = &phba->sli;
10043 unsigned long iflags;
10044 int rc;
10045
10046 /* dump from issue mailbox command if setup */
10047 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
10048
10049 rc = lpfc_mbox_dev_check(phba);
10050 if (unlikely(rc)) {
10051 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10052 "(%d):2544 Mailbox command x%x (x%x/x%x) "
10053 "cannot issue Data: x%x x%x\n",
10054 mboxq->vport ? mboxq->vport->vpi : 0,
10055 mboxq->u.mb.mbxCommand,
10056 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10057 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10058 psli->sli_flag, flag);
10059 goto out_not_finished;
10060 }
10061
10062 /* Detect polling mode and jump to a handler */
10063 if (!phba->sli4_hba.intr_enable) {
10064 if (flag == MBX_POLL)
10065 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10066 else
10067 rc = -EIO;
10068 if (rc != MBX_SUCCESS)
10069 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10070 "(%d):2541 Mailbox command x%x "
10071 "(x%x/x%x) failure: "
10072 "mqe_sta: x%x mcqe_sta: x%x/x%x "
10073 "Data: x%x x%x\n",
10074 mboxq->vport ? mboxq->vport->vpi : 0,
10075 mboxq->u.mb.mbxCommand,
10076 lpfc_sli_config_mbox_subsys_get(phba,
10077 mboxq),
10078 lpfc_sli_config_mbox_opcode_get(phba,
10079 mboxq),
10080 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10081 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10082 bf_get(lpfc_mcqe_ext_status,
10083 &mboxq->mcqe),
10084 psli->sli_flag, flag);
10085 return rc;
10086 } else if (flag == MBX_POLL) {
10087 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10088 "(%d):2542 Try to issue mailbox command "
10089 "x%x (x%x/x%x) synchronously ahead of async "
10090 "mailbox command queue: x%x x%x\n",
10091 mboxq->vport ? mboxq->vport->vpi : 0,
10092 mboxq->u.mb.mbxCommand,
10093 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10094 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10095 psli->sli_flag, flag);
10096 /* Try to block the asynchronous mailbox posting */
10097 rc = lpfc_sli4_async_mbox_block(phba);
10098 if (!rc) {
10099 /* Successfully blocked, now issue sync mbox cmd */
10100 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10101 if (rc != MBX_SUCCESS)
10102 lpfc_printf_log(phba, KERN_WARNING,
10103 LOG_MBOX | LOG_SLI,
10104 "(%d):2597 Sync Mailbox command "
10105 "x%x (x%x/x%x) failure: "
10106 "mqe_sta: x%x mcqe_sta: x%x/x%x "
10107 "Data: x%x x%x\n",
10108 mboxq->vport ? mboxq->vport->vpi : 0,
10109 mboxq->u.mb.mbxCommand,
10110 lpfc_sli_config_mbox_subsys_get(phba,
10111 mboxq),
10112 lpfc_sli_config_mbox_opcode_get(phba,
10113 mboxq),
10114 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10115 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10116 bf_get(lpfc_mcqe_ext_status,
10117 &mboxq->mcqe),
10118 psli->sli_flag, flag);
10119 /* Unblock the async mailbox posting afterward */
10120 lpfc_sli4_async_mbox_unblock(phba);
10121 }
10122 return rc;
10123 }
10124
10125 /* Now, interrupt mode asynchronous mailbox command */
10126 rc = lpfc_mbox_cmd_check(phba, mboxq);
10127 if (rc) {
10128 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10129 "(%d):2543 Mailbox command x%x (x%x/x%x) "
10130 "cannot issue Data: x%x x%x\n",
10131 mboxq->vport ? mboxq->vport->vpi : 0,
10132 mboxq->u.mb.mbxCommand,
10133 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10134 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10135 psli->sli_flag, flag);
10136 goto out_not_finished;
10137 }
10138
10139 /* Put the mailbox command to the driver internal FIFO */
10140 psli->slistat.mbox_busy++;
10141 spin_lock_irqsave(&phba->hbalock, iflags);
10142 lpfc_mbox_put(phba, mboxq);
10143 spin_unlock_irqrestore(&phba->hbalock, iflags);
10144 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10145 "(%d):0354 Mbox cmd issue - Enqueue Data: "
10146 "x%x (x%x/x%x) x%x x%x x%x\n",
10147 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
10148 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
10149 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10150 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10151 phba->pport->port_state,
10152 psli->sli_flag, MBX_NOWAIT);
10153 /* Wake up worker thread to transport mailbox command from head */
10154 lpfc_worker_wake_up(phba);
10155
10156 return MBX_BUSY;
10157
10158 out_not_finished:
10159 return MBX_NOT_FINISHED;
10160 }
10161
10162 /**
10163 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
10164 * @phba: Pointer to HBA context object.
10165 *
10166 * This function is called by worker thread to send a mailbox command to
10167 * SLI4 HBA firmware.
10168 *
10169 **/
10170 int
lpfc_sli4_post_async_mbox(struct lpfc_hba * phba)10171 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
10172 {
10173 struct lpfc_sli *psli = &phba->sli;
10174 LPFC_MBOXQ_t *mboxq;
10175 int rc = MBX_SUCCESS;
10176 unsigned long iflags;
10177 struct lpfc_mqe *mqe;
10178 uint32_t mbx_cmnd;
10179
10180 /* Check interrupt mode before post async mailbox command */
10181 if (unlikely(!phba->sli4_hba.intr_enable))
10182 return MBX_NOT_FINISHED;
10183
10184 /* Check for mailbox command service token */
10185 spin_lock_irqsave(&phba->hbalock, iflags);
10186 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
10187 spin_unlock_irqrestore(&phba->hbalock, iflags);
10188 return MBX_NOT_FINISHED;
10189 }
10190 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10191 spin_unlock_irqrestore(&phba->hbalock, iflags);
10192 return MBX_NOT_FINISHED;
10193 }
10194 if (unlikely(phba->sli.mbox_active)) {
10195 spin_unlock_irqrestore(&phba->hbalock, iflags);
10196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10197 "0384 There is pending active mailbox cmd\n");
10198 return MBX_NOT_FINISHED;
10199 }
10200 /* Take the mailbox command service token */
10201 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
10202
10203 /* Get the next mailbox command from head of queue */
10204 mboxq = lpfc_mbox_get(phba);
10205
10206 /* If no more mailbox command waiting for post, we're done */
10207 if (!mboxq) {
10208 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10209 spin_unlock_irqrestore(&phba->hbalock, iflags);
10210 return MBX_SUCCESS;
10211 }
10212 phba->sli.mbox_active = mboxq;
10213 spin_unlock_irqrestore(&phba->hbalock, iflags);
10214
10215 /* Check device readiness for posting mailbox command */
10216 rc = lpfc_mbox_dev_check(phba);
10217 if (unlikely(rc))
10218 /* Driver clean routine will clean up pending mailbox */
10219 goto out_not_finished;
10220
10221 /* Prepare the mbox command to be posted */
10222 mqe = &mboxq->u.mqe;
10223 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
10224
10225 /* Start timer for the mbox_tmo and log some mailbox post messages */
10226 mod_timer(&psli->mbox_tmo, (jiffies +
10227 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
10228
10229 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10230 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
10231 "x%x x%x\n",
10232 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10233 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10234 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10235 phba->pport->port_state, psli->sli_flag);
10236
10237 if (mbx_cmnd != MBX_HEARTBEAT) {
10238 if (mboxq->vport) {
10239 lpfc_debugfs_disc_trc(mboxq->vport,
10240 LPFC_DISC_TRC_MBOX_VPORT,
10241 "MBOX Send vport: cmd:x%x mb:x%x x%x",
10242 mbx_cmnd, mqe->un.mb_words[0],
10243 mqe->un.mb_words[1]);
10244 } else {
10245 lpfc_debugfs_disc_trc(phba->pport,
10246 LPFC_DISC_TRC_MBOX,
10247 "MBOX Send: cmd:x%x mb:x%x x%x",
10248 mbx_cmnd, mqe->un.mb_words[0],
10249 mqe->un.mb_words[1]);
10250 }
10251 }
10252 psli->slistat.mbox_cmd++;
10253
10254 /* Post the mailbox command to the port */
10255 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
10256 if (rc != MBX_SUCCESS) {
10257 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10258 "(%d):2533 Mailbox command x%x (x%x/x%x) "
10259 "cannot issue Data: x%x x%x\n",
10260 mboxq->vport ? mboxq->vport->vpi : 0,
10261 mboxq->u.mb.mbxCommand,
10262 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10263 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10264 psli->sli_flag, MBX_NOWAIT);
10265 goto out_not_finished;
10266 }
10267
10268 return rc;
10269
10270 out_not_finished:
10271 spin_lock_irqsave(&phba->hbalock, iflags);
10272 if (phba->sli.mbox_active) {
10273 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10274 __lpfc_mbox_cmpl_put(phba, mboxq);
10275 /* Release the token */
10276 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10277 phba->sli.mbox_active = NULL;
10278 }
10279 spin_unlock_irqrestore(&phba->hbalock, iflags);
10280
10281 return MBX_NOT_FINISHED;
10282 }
10283
10284 /**
10285 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
10286 * @phba: Pointer to HBA context object.
10287 * @pmbox: Pointer to mailbox object.
10288 * @flag: Flag indicating how the mailbox need to be processed.
10289 *
10290 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
10291 * the API jump table function pointer from the lpfc_hba struct.
10292 *
10293 * Return codes the caller owns the mailbox command after the return of the
10294 * function.
10295 **/
10296 int
lpfc_sli_issue_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmbox,uint32_t flag)10297 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10298 {
10299 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10300 }
10301
10302 /**
10303 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
10304 * @phba: The hba struct for which this call is being executed.
10305 * @dev_grp: The HBA PCI-Device group number.
10306 *
10307 * This routine sets up the mbox interface API function jump table in @phba
10308 * struct.
10309 * Returns: 0 - success, -ENODEV - failure.
10310 **/
10311 int
lpfc_mbox_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)10312 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10313 {
10314
10315 switch (dev_grp) {
10316 case LPFC_PCI_DEV_LP:
10317 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10318 phba->lpfc_sli_handle_slow_ring_event =
10319 lpfc_sli_handle_slow_ring_event_s3;
10320 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10321 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10322 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10323 break;
10324 case LPFC_PCI_DEV_OC:
10325 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10326 phba->lpfc_sli_handle_slow_ring_event =
10327 lpfc_sli_handle_slow_ring_event_s4;
10328 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10329 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10330 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10331 break;
10332 default:
10333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10334 "1420 Invalid HBA PCI-device group: 0x%x\n",
10335 dev_grp);
10336 return -ENODEV;
10337 }
10338 return 0;
10339 }
10340
10341 /**
10342 * __lpfc_sli_ringtx_put - Add an iocb to the txq
10343 * @phba: Pointer to HBA context object.
10344 * @pring: Pointer to driver SLI ring object.
10345 * @piocb: Pointer to address of newly added command iocb.
10346 *
10347 * This function is called with hbalock held for SLI3 ports or
10348 * the ring lock held for SLI4 ports to add a command
10349 * iocb to the txq when SLI layer cannot submit the command iocb
10350 * to the ring.
10351 **/
10352 void
__lpfc_sli_ringtx_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocb)10353 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10354 struct lpfc_iocbq *piocb)
10355 {
10356 if (phba->sli_rev == LPFC_SLI_REV4)
10357 lockdep_assert_held(&pring->ring_lock);
10358 else
10359 lockdep_assert_held(&phba->hbalock);
10360 /* Insert the caller's iocb in the txq tail for later processing. */
10361 list_add_tail(&piocb->list, &pring->txq);
10362 }
10363
10364 /**
10365 * lpfc_sli_next_iocb - Get the next iocb in the txq
10366 * @phba: Pointer to HBA context object.
10367 * @pring: Pointer to driver SLI ring object.
10368 * @piocb: Pointer to address of newly added command iocb.
10369 *
10370 * This function is called with hbalock held before a new
10371 * iocb is submitted to the firmware. This function checks
10372 * txq to flush the iocbs in txq to Firmware before
10373 * submitting new iocbs to the Firmware.
10374 * If there are iocbs in the txq which need to be submitted
10375 * to firmware, lpfc_sli_next_iocb returns the first element
10376 * of the txq after dequeuing it from txq.
10377 * If there is no iocb in the txq then the function will return
10378 * *piocb and *piocb is set to NULL. Caller needs to check
10379 * *piocb to find if there are more commands in the txq.
10380 **/
10381 static struct lpfc_iocbq *
lpfc_sli_next_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq ** piocb)10382 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10383 struct lpfc_iocbq **piocb)
10384 {
10385 struct lpfc_iocbq * nextiocb;
10386
10387 lockdep_assert_held(&phba->hbalock);
10388
10389 nextiocb = lpfc_sli_ringtx_get(phba, pring);
10390 if (!nextiocb) {
10391 nextiocb = *piocb;
10392 *piocb = NULL;
10393 }
10394
10395 return nextiocb;
10396 }
10397
10398 /**
10399 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10400 * @phba: Pointer to HBA context object.
10401 * @ring_number: SLI ring number to issue iocb on.
10402 * @piocb: Pointer to command iocb.
10403 * @flag: Flag indicating if this command can be put into txq.
10404 *
10405 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10406 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10407 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10408 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10409 * this function allows only iocbs for posting buffers. This function finds
10410 * next available slot in the command ring and posts the command to the
10411 * available slot and writes the port attention register to request HBA start
10412 * processing new iocb. If there is no slot available in the ring and
10413 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10414 * the function returns IOCB_BUSY.
10415 *
10416 * This function is called with hbalock held. The function will return success
10417 * after it successfully submit the iocb to firmware or after adding to the
10418 * txq.
10419 **/
10420 static int
__lpfc_sli_issue_iocb_s3(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10421 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10422 struct lpfc_iocbq *piocb, uint32_t flag)
10423 {
10424 struct lpfc_iocbq *nextiocb;
10425 IOCB_t *iocb;
10426 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10427
10428 lockdep_assert_held(&phba->hbalock);
10429
10430 if (piocb->cmd_cmpl && (!piocb->vport) &&
10431 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10432 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10434 "1807 IOCB x%x failed. No vport\n",
10435 piocb->iocb.ulpCommand);
10436 dump_stack();
10437 return IOCB_ERROR;
10438 }
10439
10440
10441 /* If the PCI channel is in offline state, do not post iocbs. */
10442 if (unlikely(pci_channel_offline(phba->pcidev)))
10443 return IOCB_ERROR;
10444
10445 /* If HBA has a deferred error attention, fail the iocb. */
10446 if (unlikely(phba->hba_flag & DEFER_ERATT))
10447 return IOCB_ERROR;
10448
10449 /*
10450 * We should never get an IOCB if we are in a < LINK_DOWN state
10451 */
10452 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10453 return IOCB_ERROR;
10454
10455 /*
10456 * Check to see if we are blocking IOCB processing because of a
10457 * outstanding event.
10458 */
10459 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10460 goto iocb_busy;
10461
10462 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10463 /*
10464 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10465 * can be issued if the link is not up.
10466 */
10467 switch (piocb->iocb.ulpCommand) {
10468 case CMD_QUE_RING_BUF_CN:
10469 case CMD_QUE_RING_BUF64_CN:
10470 /*
10471 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10472 * completion, cmd_cmpl MUST be 0.
10473 */
10474 if (piocb->cmd_cmpl)
10475 piocb->cmd_cmpl = NULL;
10476 fallthrough;
10477 case CMD_CREATE_XRI_CR:
10478 case CMD_CLOSE_XRI_CN:
10479 case CMD_CLOSE_XRI_CX:
10480 break;
10481 default:
10482 goto iocb_busy;
10483 }
10484
10485 /*
10486 * For FCP commands, we must be in a state where we can process link
10487 * attention events.
10488 */
10489 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10490 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10491 goto iocb_busy;
10492 }
10493
10494 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10495 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10496 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10497
10498 if (iocb)
10499 lpfc_sli_update_ring(phba, pring);
10500 else
10501 lpfc_sli_update_full_ring(phba, pring);
10502
10503 if (!piocb)
10504 return IOCB_SUCCESS;
10505
10506 goto out_busy;
10507
10508 iocb_busy:
10509 pring->stats.iocb_cmd_delay++;
10510
10511 out_busy:
10512
10513 if (!(flag & SLI_IOCB_RET_IOCB)) {
10514 __lpfc_sli_ringtx_put(phba, pring, piocb);
10515 return IOCB_SUCCESS;
10516 }
10517
10518 return IOCB_BUSY;
10519 }
10520
10521 /**
10522 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10523 * @phba: Pointer to HBA context object.
10524 * @ring_number: SLI ring number to issue wqe on.
10525 * @piocb: Pointer to command iocb.
10526 * @flag: Flag indicating if this command can be put into txq.
10527 *
10528 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10529 * send an iocb command to an HBA with SLI-3 interface spec.
10530 *
10531 * This function takes the hbalock before invoking the lockless version.
10532 * The function will return success after it successfully submit the wqe to
10533 * firmware or after adding to the txq.
10534 **/
10535 static int
__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10536 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10537 struct lpfc_iocbq *piocb, uint32_t flag)
10538 {
10539 unsigned long iflags;
10540 int rc;
10541
10542 spin_lock_irqsave(&phba->hbalock, iflags);
10543 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10544 spin_unlock_irqrestore(&phba->hbalock, iflags);
10545
10546 return rc;
10547 }
10548
10549 /**
10550 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10551 * @phba: Pointer to HBA context object.
10552 * @ring_number: SLI ring number to issue wqe on.
10553 * @piocb: Pointer to command iocb.
10554 * @flag: Flag indicating if this command can be put into txq.
10555 *
10556 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10557 * an wqe command to an HBA with SLI-4 interface spec.
10558 *
10559 * This function is a lockless version. The function will return success
10560 * after it successfully submit the wqe to firmware or after adding to the
10561 * txq.
10562 **/
10563 static int
__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10564 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10565 struct lpfc_iocbq *piocb, uint32_t flag)
10566 {
10567 struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
10568
10569 lpfc_prep_embed_io(phba, lpfc_cmd);
10570 return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10571 }
10572
10573 void
lpfc_prep_embed_io(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)10574 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10575 {
10576 struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10577 union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10578 struct sli4_sge *sgl;
10579
10580 /* 128 byte wqe support here */
10581 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10582
10583 if (phba->fcp_embed_io) {
10584 struct fcp_cmnd *fcp_cmnd;
10585 u32 *ptr;
10586
10587 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10588
10589 /* Word 0-2 - FCP_CMND */
10590 wqe->generic.bde.tus.f.bdeFlags =
10591 BUFF_TYPE_BDE_IMMED;
10592 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10593 wqe->generic.bde.addrHigh = 0;
10594 wqe->generic.bde.addrLow = 88; /* Word 22 */
10595
10596 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10597 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10598
10599 /* Word 22-29 FCP CMND Payload */
10600 ptr = &wqe->words[22];
10601 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10602 } else {
10603 /* Word 0-2 - Inline BDE */
10604 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
10605 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10606 wqe->generic.bde.addrHigh = sgl->addr_hi;
10607 wqe->generic.bde.addrLow = sgl->addr_lo;
10608
10609 /* Word 10 */
10610 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10611 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10612 }
10613
10614 /* add the VMID tags as per switch response */
10615 if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10616 if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
10617 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10618 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10619 (piocb->vmid_tag.cs_ctl_vmid));
10620 } else if (phba->cfg_vmid_app_header) {
10621 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10622 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10623 wqe->words[31] = piocb->vmid_tag.app_id;
10624 }
10625 }
10626 }
10627
10628 /**
10629 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10630 * @phba: Pointer to HBA context object.
10631 * @ring_number: SLI ring number to issue iocb on.
10632 * @piocb: Pointer to command iocb.
10633 * @flag: Flag indicating if this command can be put into txq.
10634 *
10635 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10636 * an iocb command to an HBA with SLI-4 interface spec.
10637 *
10638 * This function is called with ringlock held. The function will return success
10639 * after it successfully submit the iocb to firmware or after adding to the
10640 * txq.
10641 **/
10642 static int
__lpfc_sli_issue_iocb_s4(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10643 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10644 struct lpfc_iocbq *piocb, uint32_t flag)
10645 {
10646 struct lpfc_sglq *sglq;
10647 union lpfc_wqe128 *wqe;
10648 struct lpfc_queue *wq;
10649 struct lpfc_sli_ring *pring;
10650 u32 ulp_command = get_job_cmnd(phba, piocb);
10651
10652 /* Get the WQ */
10653 if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10654 (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10655 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10656 } else {
10657 wq = phba->sli4_hba.els_wq;
10658 }
10659
10660 /* Get corresponding ring */
10661 pring = wq->pring;
10662
10663 /*
10664 * The WQE can be either 64 or 128 bytes,
10665 */
10666
10667 lockdep_assert_held(&pring->ring_lock);
10668 wqe = &piocb->wqe;
10669 if (piocb->sli4_xritag == NO_XRI) {
10670 if (ulp_command == CMD_ABORT_XRI_CX)
10671 sglq = NULL;
10672 else {
10673 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10674 if (!sglq) {
10675 if (!(flag & SLI_IOCB_RET_IOCB)) {
10676 __lpfc_sli_ringtx_put(phba,
10677 pring,
10678 piocb);
10679 return IOCB_SUCCESS;
10680 } else {
10681 return IOCB_BUSY;
10682 }
10683 }
10684 }
10685 } else if (piocb->cmd_flag & LPFC_IO_FCP) {
10686 /* These IO's already have an XRI and a mapped sgl. */
10687 sglq = NULL;
10688 }
10689 else {
10690 /*
10691 * This is a continuation of a commandi,(CX) so this
10692 * sglq is on the active list
10693 */
10694 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10695 if (!sglq)
10696 return IOCB_ERROR;
10697 }
10698
10699 if (sglq) {
10700 piocb->sli4_lxritag = sglq->sli4_lxritag;
10701 piocb->sli4_xritag = sglq->sli4_xritag;
10702
10703 /* ABTS sent by initiator to CT exchange, the
10704 * RX_ID field will be filled with the newly
10705 * allocated responder XRI.
10706 */
10707 if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10708 piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10709 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10710 piocb->sli4_xritag);
10711
10712 bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10713 piocb->sli4_xritag);
10714
10715 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10716 return IOCB_ERROR;
10717 }
10718
10719 if (lpfc_sli4_wq_put(wq, wqe))
10720 return IOCB_ERROR;
10721
10722 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10723
10724 return 0;
10725 }
10726
10727 /*
10728 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10729 *
10730 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10731 * or IOCB for sli-3 function.
10732 * pointer from the lpfc_hba struct.
10733 *
10734 * Return codes:
10735 * IOCB_ERROR - Error
10736 * IOCB_SUCCESS - Success
10737 * IOCB_BUSY - Busy
10738 **/
10739 int
lpfc_sli_issue_fcp_io(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10740 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10741 struct lpfc_iocbq *piocb, uint32_t flag)
10742 {
10743 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10744 }
10745
10746 /*
10747 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10748 *
10749 * This routine wraps the actual lockless version for issusing IOCB function
10750 * pointer from the lpfc_hba struct.
10751 *
10752 * Return codes:
10753 * IOCB_ERROR - Error
10754 * IOCB_SUCCESS - Success
10755 * IOCB_BUSY - Busy
10756 **/
10757 int
__lpfc_sli_issue_iocb(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10758 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10759 struct lpfc_iocbq *piocb, uint32_t flag)
10760 {
10761 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10762 }
10763
10764 static void
__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq * cmdiocbq,struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,u16 cmd_size,u32 did,u32 elscmd,u8 tmo,u8 expect_rsp)10765 __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10766 struct lpfc_vport *vport,
10767 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10768 u32 elscmd, u8 tmo, u8 expect_rsp)
10769 {
10770 struct lpfc_hba *phba = vport->phba;
10771 IOCB_t *cmd;
10772
10773 cmd = &cmdiocbq->iocb;
10774 memset(cmd, 0, sizeof(*cmd));
10775
10776 cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10777 cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10778 cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10779
10780 if (expect_rsp) {
10781 cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10782 cmd->un.elsreq64.remoteID = did; /* DID */
10783 cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10784 cmd->ulpTimeout = tmo;
10785 } else {
10786 cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10787 cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
10788 cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10789 cmd->ulpPU = PARM_NPIV_DID;
10790 }
10791 cmd->ulpBdeCount = 1;
10792 cmd->ulpLe = 1;
10793 cmd->ulpClass = CLASS3;
10794
10795 /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
10796 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10797 if (expect_rsp) {
10798 cmd->un.elsreq64.myID = vport->fc_myDID;
10799
10800 /* For ELS_REQUEST64_CR, use the VPI by default */
10801 cmd->ulpContext = phba->vpi_ids[vport->vpi];
10802 }
10803
10804 cmd->ulpCt_h = 0;
10805 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10806 if (elscmd == ELS_CMD_ECHO)
10807 cmd->ulpCt_l = 0; /* context = invalid RPI */
10808 else
10809 cmd->ulpCt_l = 1; /* context = VPI */
10810 }
10811 }
10812
10813 static void
__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq * cmdiocbq,struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,u16 cmd_size,u32 did,u32 elscmd,u8 tmo,u8 expect_rsp)10814 __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10815 struct lpfc_vport *vport,
10816 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10817 u32 elscmd, u8 tmo, u8 expect_rsp)
10818 {
10819 struct lpfc_hba *phba = vport->phba;
10820 union lpfc_wqe128 *wqe;
10821 struct ulp_bde64_le *bde;
10822 u8 els_id;
10823
10824 wqe = &cmdiocbq->wqe;
10825 memset(wqe, 0, sizeof(*wqe));
10826
10827 /* Word 0 - 2 BDE */
10828 bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10829 bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10830 bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10831 bde->type_size = cpu_to_le32(cmd_size);
10832 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10833
10834 if (expect_rsp) {
10835 bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
10836
10837 /* Transfer length */
10838 wqe->els_req.payload_len = cmd_size;
10839 wqe->els_req.max_response_payload_len = FCELSSIZE;
10840
10841 /* DID */
10842 bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10843
10844 /* Word 11 - ELS_ID */
10845 switch (elscmd) {
10846 case ELS_CMD_PLOGI:
10847 els_id = LPFC_ELS_ID_PLOGI;
10848 break;
10849 case ELS_CMD_FLOGI:
10850 els_id = LPFC_ELS_ID_FLOGI;
10851 break;
10852 case ELS_CMD_LOGO:
10853 els_id = LPFC_ELS_ID_LOGO;
10854 break;
10855 case ELS_CMD_FDISC:
10856 if (!vport->fc_myDID) {
10857 els_id = LPFC_ELS_ID_FDISC;
10858 break;
10859 }
10860 fallthrough;
10861 default:
10862 els_id = LPFC_ELS_ID_DEFAULT;
10863 break;
10864 }
10865
10866 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10867 } else {
10868 /* DID */
10869 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10870
10871 /* Transfer length */
10872 wqe->xmit_els_rsp.response_payload_len = cmd_size;
10873
10874 bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10875 CMD_XMIT_ELS_RSP64_WQE);
10876 }
10877
10878 bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10879 bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10880 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10881
10882 /* If we have NPIV enabled, we want to send ELS traffic by VPI.
10883 * For SLI4, since the driver controls VPIs we also want to include
10884 * all ELS pt2pt protocol traffic as well.
10885 */
10886 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10887 (vport->fc_flag & FC_PT2PT)) {
10888 if (expect_rsp) {
10889 bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10890
10891 /* For ELS_REQUEST64_WQE, use the VPI by default */
10892 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10893 phba->vpi_ids[vport->vpi]);
10894 }
10895
10896 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10897 if (elscmd == ELS_CMD_ECHO)
10898 bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10899 else
10900 bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10901 }
10902 }
10903
10904 void
lpfc_sli_prep_els_req_rsp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,u16 cmd_size,u32 did,u32 elscmd,u8 tmo,u8 expect_rsp)10905 lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10906 struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10907 u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10908 u8 expect_rsp)
10909 {
10910 phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10911 elscmd, tmo, expect_rsp);
10912 }
10913
10914 static void
__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u32 num_entry,u8 tmo)10915 __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10916 u16 rpi, u32 num_entry, u8 tmo)
10917 {
10918 IOCB_t *cmd;
10919
10920 cmd = &cmdiocbq->iocb;
10921 memset(cmd, 0, sizeof(*cmd));
10922
10923 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10924 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10925 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10926 cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10927
10928 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10929 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10930 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10931
10932 cmd->ulpContext = rpi;
10933 cmd->ulpClass = CLASS3;
10934 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10935 cmd->ulpBdeCount = 1;
10936 cmd->ulpLe = 1;
10937 cmd->ulpOwner = OWN_CHIP;
10938 cmd->ulpTimeout = tmo;
10939 }
10940
10941 static void
__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u32 num_entry,u8 tmo)10942 __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10943 u16 rpi, u32 num_entry, u8 tmo)
10944 {
10945 union lpfc_wqe128 *cmdwqe;
10946 struct ulp_bde64_le *bde, *bpl;
10947 u32 xmit_len = 0, total_len = 0, size, type, i;
10948
10949 cmdwqe = &cmdiocbq->wqe;
10950 memset(cmdwqe, 0, sizeof(*cmdwqe));
10951
10952 /* Calculate total_len and xmit_len */
10953 bpl = (struct ulp_bde64_le *)bmp->virt;
10954 for (i = 0; i < num_entry; i++) {
10955 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10956 total_len += size;
10957 }
10958 for (i = 0; i < num_entry; i++) {
10959 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10960 type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
10961 if (type != ULP_BDE64_TYPE_BDE_64)
10962 break;
10963 xmit_len += size;
10964 }
10965
10966 /* Words 0 - 2 */
10967 bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
10968 bde->addr_low = bpl->addr_low;
10969 bde->addr_high = bpl->addr_high;
10970 bde->type_size = cpu_to_le32(xmit_len);
10971 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10972
10973 /* Word 3 */
10974 cmdwqe->gen_req.request_payload_len = xmit_len;
10975
10976 /* Word 5 */
10977 bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
10978 bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
10979 bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
10980 bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
10981
10982 /* Word 6 */
10983 bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
10984
10985 /* Word 7 */
10986 bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
10987 bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
10988 bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
10989 bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
10990
10991 /* Word 12 */
10992 cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
10993 }
10994
10995 void
lpfc_sli_prep_gen_req(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u32 num_entry,u8 tmo)10996 lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10997 struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
10998 {
10999 phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
11000 }
11001
11002 static void
__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u16 ox_id,u32 num_entry,u8 rctl,u8 last_seq,u8 cr_cx_cmd)11003 __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
11004 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11005 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11006 {
11007 IOCB_t *icmd;
11008
11009 icmd = &cmdiocbq->iocb;
11010 memset(icmd, 0, sizeof(*icmd));
11011
11012 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
11013 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
11014 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
11015 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
11016 icmd->un.xseq64.w5.hcsw.Fctl = LA;
11017 if (last_seq)
11018 icmd->un.xseq64.w5.hcsw.Fctl |= LS;
11019 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11020 icmd->un.xseq64.w5.hcsw.Rctl = rctl;
11021 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
11022
11023 icmd->ulpBdeCount = 1;
11024 icmd->ulpLe = 1;
11025 icmd->ulpClass = CLASS3;
11026
11027 switch (cr_cx_cmd) {
11028 case CMD_XMIT_SEQUENCE64_CR:
11029 icmd->ulpContext = rpi;
11030 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
11031 break;
11032 case CMD_XMIT_SEQUENCE64_CX:
11033 icmd->ulpContext = ox_id;
11034 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
11035 break;
11036 default:
11037 break;
11038 }
11039 }
11040
11041 static void
__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u16 ox_id,u32 full_size,u8 rctl,u8 last_seq,u8 cr_cx_cmd)11042 __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
11043 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11044 u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11045 {
11046 union lpfc_wqe128 *wqe;
11047 struct ulp_bde64 *bpl;
11048
11049 wqe = &cmdiocbq->wqe;
11050 memset(wqe, 0, sizeof(*wqe));
11051
11052 /* Words 0 - 2 */
11053 bpl = (struct ulp_bde64 *)bmp->virt;
11054 wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
11055 wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
11056 wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
11057
11058 /* Word 5 */
11059 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
11060 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
11061 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
11062 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
11063 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
11064
11065 /* Word 6 */
11066 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
11067
11068 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
11069 CMD_XMIT_SEQUENCE64_WQE);
11070
11071 /* Word 7 */
11072 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
11073
11074 /* Word 9 */
11075 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
11076
11077 /* Word 12 */
11078 if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
11079 wqe->xmit_sequence.xmit_len = full_size;
11080 else
11081 wqe->xmit_sequence.xmit_len =
11082 wqe->xmit_sequence.bde.tus.f.bdeSize;
11083 }
11084
11085 void
lpfc_sli_prep_xmit_seq64(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u16 ox_id,u32 num_entry,u8 rctl,u8 last_seq,u8 cr_cx_cmd)11086 lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11087 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11088 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11089 {
11090 phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
11091 rctl, last_seq, cr_cx_cmd);
11092 }
11093
11094 static void
__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq * cmdiocbq,u16 ulp_context,u16 iotag,u8 ulp_class,u16 cqid,bool ia,bool wqec)11095 __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11096 u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11097 bool wqec)
11098 {
11099 IOCB_t *icmd = NULL;
11100
11101 icmd = &cmdiocbq->iocb;
11102 memset(icmd, 0, sizeof(*icmd));
11103
11104 /* Word 5 */
11105 icmd->un.acxri.abortContextTag = ulp_context;
11106 icmd->un.acxri.abortIoTag = iotag;
11107
11108 if (ia) {
11109 /* Word 7 */
11110 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
11111 } else {
11112 /* Word 3 */
11113 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
11114
11115 /* Word 7 */
11116 icmd->ulpClass = ulp_class;
11117 icmd->ulpCommand = CMD_ABORT_XRI_CN;
11118 }
11119
11120 /* Word 7 */
11121 icmd->ulpLe = 1;
11122 }
11123
11124 static void
__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq * cmdiocbq,u16 ulp_context,u16 iotag,u8 ulp_class,u16 cqid,bool ia,bool wqec)11125 __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11126 u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11127 bool wqec)
11128 {
11129 union lpfc_wqe128 *wqe;
11130
11131 wqe = &cmdiocbq->wqe;
11132 memset(wqe, 0, sizeof(*wqe));
11133
11134 /* Word 3 */
11135 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
11136 if (ia)
11137 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
11138 else
11139 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
11140
11141 /* Word 7 */
11142 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
11143
11144 /* Word 8 */
11145 wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
11146
11147 /* Word 9 */
11148 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
11149
11150 /* Word 10 */
11151 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
11152
11153 /* Word 11 */
11154 if (wqec)
11155 bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
11156 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
11157 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11158 }
11159
11160 void
lpfc_sli_prep_abort_xri(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,u16 ulp_context,u16 iotag,u8 ulp_class,u16 cqid,bool ia,bool wqec)11161 lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11162 u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
11163 bool ia, bool wqec)
11164 {
11165 phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
11166 cqid, ia, wqec);
11167 }
11168
11169 /**
11170 * lpfc_sli_api_table_setup - Set up sli api function jump table
11171 * @phba: The hba struct for which this call is being executed.
11172 * @dev_grp: The HBA PCI-Device group number.
11173 *
11174 * This routine sets up the SLI interface API function jump table in @phba
11175 * struct.
11176 * Returns: 0 - success, -ENODEV - failure.
11177 **/
11178 int
lpfc_sli_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)11179 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
11180 {
11181
11182 switch (dev_grp) {
11183 case LPFC_PCI_DEV_LP:
11184 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
11185 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
11186 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
11187 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
11188 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
11189 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
11190 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
11191 break;
11192 case LPFC_PCI_DEV_OC:
11193 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
11194 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
11195 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
11196 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
11197 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
11198 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
11199 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
11200 break;
11201 default:
11202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11203 "1419 Invalid HBA PCI-device group: 0x%x\n",
11204 dev_grp);
11205 return -ENODEV;
11206 }
11207 return 0;
11208 }
11209
11210 /**
11211 * lpfc_sli4_calc_ring - Calculates which ring to use
11212 * @phba: Pointer to HBA context object.
11213 * @piocb: Pointer to command iocb.
11214 *
11215 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
11216 * hba_wqidx, thus we need to calculate the corresponding ring.
11217 * Since ABORTS must go on the same WQ of the command they are
11218 * aborting, we use command's hba_wqidx.
11219 */
11220 struct lpfc_sli_ring *
lpfc_sli4_calc_ring(struct lpfc_hba * phba,struct lpfc_iocbq * piocb)11221 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
11222 {
11223 struct lpfc_io_buf *lpfc_cmd;
11224
11225 if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
11226 if (unlikely(!phba->sli4_hba.hdwq))
11227 return NULL;
11228 /*
11229 * for abort iocb hba_wqidx should already
11230 * be setup based on what work queue we used.
11231 */
11232 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11233 lpfc_cmd = piocb->io_buf;
11234 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
11235 }
11236 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
11237 } else {
11238 if (unlikely(!phba->sli4_hba.els_wq))
11239 return NULL;
11240 piocb->hba_wqidx = 0;
11241 return phba->sli4_hba.els_wq->pring;
11242 }
11243 }
11244
lpfc_sli4_poll_eq(struct lpfc_queue * eq)11245 inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
11246 {
11247 struct lpfc_hba *phba = eq->phba;
11248
11249 /*
11250 * Unlocking an irq is one of the entry point to check
11251 * for re-schedule, but we are good for io submission
11252 * path as midlayer does a get_cpu to glue us in. Flush
11253 * out the invalidate queue so we can see the updated
11254 * value for flag.
11255 */
11256 smp_rmb();
11257
11258 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
11259 /* We will not likely get the completion for the caller
11260 * during this iteration but i guess that's fine.
11261 * Future io's coming on this eq should be able to
11262 * pick it up. As for the case of single io's, they
11263 * will be handled through a sched from polling timer
11264 * function which is currently triggered every 1msec.
11265 */
11266 lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
11267 LPFC_QUEUE_WORK);
11268 }
11269
11270 /**
11271 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11272 * @phba: Pointer to HBA context object.
11273 * @ring_number: Ring number
11274 * @piocb: Pointer to command iocb.
11275 * @flag: Flag indicating if this command can be put into txq.
11276 *
11277 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11278 * function. This function gets the hbalock and calls
11279 * __lpfc_sli_issue_iocb function and will return the error returned
11280 * by __lpfc_sli_issue_iocb function. This wrapper is used by
11281 * functions which do not hold hbalock.
11282 **/
11283 int
lpfc_sli_issue_iocb(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)11284 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11285 struct lpfc_iocbq *piocb, uint32_t flag)
11286 {
11287 struct lpfc_sli_ring *pring;
11288 struct lpfc_queue *eq;
11289 unsigned long iflags;
11290 int rc;
11291
11292 /* If the PCI channel is in offline state, do not post iocbs. */
11293 if (unlikely(pci_channel_offline(phba->pcidev)))
11294 return IOCB_ERROR;
11295
11296 if (phba->sli_rev == LPFC_SLI_REV4) {
11297 lpfc_sli_prep_wqe(phba, piocb);
11298
11299 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11300
11301 pring = lpfc_sli4_calc_ring(phba, piocb);
11302 if (unlikely(pring == NULL))
11303 return IOCB_ERROR;
11304
11305 spin_lock_irqsave(&pring->ring_lock, iflags);
11306 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11307 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11308
11309 lpfc_sli4_poll_eq(eq);
11310 } else {
11311 /* For now, SLI2/3 will still use hbalock */
11312 spin_lock_irqsave(&phba->hbalock, iflags);
11313 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11314 spin_unlock_irqrestore(&phba->hbalock, iflags);
11315 }
11316 return rc;
11317 }
11318
11319 /**
11320 * lpfc_extra_ring_setup - Extra ring setup function
11321 * @phba: Pointer to HBA context object.
11322 *
11323 * This function is called while driver attaches with the
11324 * HBA to setup the extra ring. The extra ring is used
11325 * only when driver needs to support target mode functionality
11326 * or IP over FC functionalities.
11327 *
11328 * This function is called with no lock held. SLI3 only.
11329 **/
11330 static int
lpfc_extra_ring_setup(struct lpfc_hba * phba)11331 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11332 {
11333 struct lpfc_sli *psli;
11334 struct lpfc_sli_ring *pring;
11335
11336 psli = &phba->sli;
11337
11338 /* Adjust cmd/rsp ring iocb entries more evenly */
11339
11340 /* Take some away from the FCP ring */
11341 pring = &psli->sli3_ring[LPFC_FCP_RING];
11342 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11343 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11344 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11345 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11346
11347 /* and give them to the extra ring */
11348 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11349
11350 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11351 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11352 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11353 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11354
11355 /* Setup default profile for this ring */
11356 pring->iotag_max = 4096;
11357 pring->num_mask = 1;
11358 pring->prt[0].profile = 0; /* Mask 0 */
11359 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11360 pring->prt[0].type = phba->cfg_multi_ring_type;
11361 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11362 return 0;
11363 }
11364
11365 static void
lpfc_sli_post_recovery_event(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp)11366 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11367 struct lpfc_nodelist *ndlp)
11368 {
11369 unsigned long iflags;
11370 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
11371
11372 /* Hold a node reference for outstanding queued work */
11373 if (!lpfc_nlp_get(ndlp))
11374 return;
11375
11376 spin_lock_irqsave(&phba->hbalock, iflags);
11377 if (!list_empty(&evtp->evt_listp)) {
11378 spin_unlock_irqrestore(&phba->hbalock, iflags);
11379 lpfc_nlp_put(ndlp);
11380 return;
11381 }
11382
11383 evtp->evt_arg1 = ndlp;
11384 evtp->evt = LPFC_EVT_RECOVER_PORT;
11385 list_add_tail(&evtp->evt_listp, &phba->work_list);
11386 spin_unlock_irqrestore(&phba->hbalock, iflags);
11387
11388 lpfc_worker_wake_up(phba);
11389 }
11390
11391 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11392 * @phba: Pointer to HBA context object.
11393 * @iocbq: Pointer to iocb object.
11394 *
11395 * The async_event handler calls this routine when it receives
11396 * an ASYNC_STATUS_CN event from the port. The port generates
11397 * this event when an Abort Sequence request to an rport fails
11398 * twice in succession. The abort could be originated by the
11399 * driver or by the port. The ABTS could have been for an ELS
11400 * or FCP IO. The port only generates this event when an ABTS
11401 * fails to complete after one retry.
11402 */
11403 static void
lpfc_sli_abts_err_handler(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)11404 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11405 struct lpfc_iocbq *iocbq)
11406 {
11407 struct lpfc_nodelist *ndlp = NULL;
11408 uint16_t rpi = 0, vpi = 0;
11409 struct lpfc_vport *vport = NULL;
11410
11411 /* The rpi in the ulpContext is vport-sensitive. */
11412 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11413 rpi = iocbq->iocb.ulpContext;
11414
11415 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11416 "3092 Port generated ABTS async event "
11417 "on vpi %d rpi %d status 0x%x\n",
11418 vpi, rpi, iocbq->iocb.ulpStatus);
11419
11420 vport = lpfc_find_vport_by_vpid(phba, vpi);
11421 if (!vport)
11422 goto err_exit;
11423 ndlp = lpfc_findnode_rpi(vport, rpi);
11424 if (!ndlp)
11425 goto err_exit;
11426
11427 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11428 lpfc_sli_abts_recover_port(vport, ndlp);
11429 return;
11430
11431 err_exit:
11432 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11433 "3095 Event Context not found, no "
11434 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11435 vpi, rpi, iocbq->iocb.ulpStatus,
11436 iocbq->iocb.ulpContext);
11437 }
11438
11439 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11440 * @phba: pointer to HBA context object.
11441 * @ndlp: nodelist pointer for the impacted rport.
11442 * @axri: pointer to the wcqe containing the failed exchange.
11443 *
11444 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11445 * port. The port generates this event when an abort exchange request to an
11446 * rport fails twice in succession with no reply. The abort could be originated
11447 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
11448 */
11449 void
lpfc_sli4_abts_err_handler(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct sli4_wcqe_xri_aborted * axri)11450 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11451 struct lpfc_nodelist *ndlp,
11452 struct sli4_wcqe_xri_aborted *axri)
11453 {
11454 uint32_t ext_status = 0;
11455
11456 if (!ndlp) {
11457 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11458 "3115 Node Context not found, driver "
11459 "ignoring abts err event\n");
11460 return;
11461 }
11462
11463 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11464 "3116 Port generated FCP XRI ABORT event on "
11465 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11466 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11467 bf_get(lpfc_wcqe_xa_xri, axri),
11468 bf_get(lpfc_wcqe_xa_status, axri),
11469 axri->parameter);
11470
11471 /*
11472 * Catch the ABTS protocol failure case. Older OCe FW releases returned
11473 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11474 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11475 */
11476 ext_status = axri->parameter & IOERR_PARAM_MASK;
11477 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11478 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11479 lpfc_sli_post_recovery_event(phba, ndlp);
11480 }
11481
11482 /**
11483 * lpfc_sli_async_event_handler - ASYNC iocb handler function
11484 * @phba: Pointer to HBA context object.
11485 * @pring: Pointer to driver SLI ring object.
11486 * @iocbq: Pointer to iocb object.
11487 *
11488 * This function is called by the slow ring event handler
11489 * function when there is an ASYNC event iocb in the ring.
11490 * This function is called with no lock held.
11491 * Currently this function handles only temperature related
11492 * ASYNC events. The function decodes the temperature sensor
11493 * event message and posts events for the management applications.
11494 **/
11495 static void
lpfc_sli_async_event_handler(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * iocbq)11496 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11497 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11498 {
11499 IOCB_t *icmd;
11500 uint16_t evt_code;
11501 struct temp_event temp_event_data;
11502 struct Scsi_Host *shost;
11503 uint32_t *iocb_w;
11504
11505 icmd = &iocbq->iocb;
11506 evt_code = icmd->un.asyncstat.evt_code;
11507
11508 switch (evt_code) {
11509 case ASYNC_TEMP_WARN:
11510 case ASYNC_TEMP_SAFE:
11511 temp_event_data.data = (uint32_t) icmd->ulpContext;
11512 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11513 if (evt_code == ASYNC_TEMP_WARN) {
11514 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11515 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11516 "0347 Adapter is very hot, please take "
11517 "corrective action. temperature : %d Celsius\n",
11518 (uint32_t) icmd->ulpContext);
11519 } else {
11520 temp_event_data.event_code = LPFC_NORMAL_TEMP;
11521 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11522 "0340 Adapter temperature is OK now. "
11523 "temperature : %d Celsius\n",
11524 (uint32_t) icmd->ulpContext);
11525 }
11526
11527 /* Send temperature change event to applications */
11528 shost = lpfc_shost_from_vport(phba->pport);
11529 fc_host_post_vendor_event(shost, fc_get_event_number(),
11530 sizeof(temp_event_data), (char *) &temp_event_data,
11531 LPFC_NL_VENDOR_ID);
11532 break;
11533 case ASYNC_STATUS_CN:
11534 lpfc_sli_abts_err_handler(phba, iocbq);
11535 break;
11536 default:
11537 iocb_w = (uint32_t *) icmd;
11538 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11539 "0346 Ring %d handler: unexpected ASYNC_STATUS"
11540 " evt_code 0x%x\n"
11541 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
11542 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
11543 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
11544 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11545 pring->ringno, icmd->un.asyncstat.evt_code,
11546 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11547 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11548 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11549 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11550
11551 break;
11552 }
11553 }
11554
11555
11556 /**
11557 * lpfc_sli4_setup - SLI ring setup function
11558 * @phba: Pointer to HBA context object.
11559 *
11560 * lpfc_sli_setup sets up rings of the SLI interface with
11561 * number of iocbs per ring and iotags. This function is
11562 * called while driver attach to the HBA and before the
11563 * interrupts are enabled. So there is no need for locking.
11564 *
11565 * This function always returns 0.
11566 **/
11567 int
lpfc_sli4_setup(struct lpfc_hba * phba)11568 lpfc_sli4_setup(struct lpfc_hba *phba)
11569 {
11570 struct lpfc_sli_ring *pring;
11571
11572 pring = phba->sli4_hba.els_wq->pring;
11573 pring->num_mask = LPFC_MAX_RING_MASK;
11574 pring->prt[0].profile = 0; /* Mask 0 */
11575 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11576 pring->prt[0].type = FC_TYPE_ELS;
11577 pring->prt[0].lpfc_sli_rcv_unsol_event =
11578 lpfc_els_unsol_event;
11579 pring->prt[1].profile = 0; /* Mask 1 */
11580 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11581 pring->prt[1].type = FC_TYPE_ELS;
11582 pring->prt[1].lpfc_sli_rcv_unsol_event =
11583 lpfc_els_unsol_event;
11584 pring->prt[2].profile = 0; /* Mask 2 */
11585 /* NameServer Inquiry */
11586 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11587 /* NameServer */
11588 pring->prt[2].type = FC_TYPE_CT;
11589 pring->prt[2].lpfc_sli_rcv_unsol_event =
11590 lpfc_ct_unsol_event;
11591 pring->prt[3].profile = 0; /* Mask 3 */
11592 /* NameServer response */
11593 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11594 /* NameServer */
11595 pring->prt[3].type = FC_TYPE_CT;
11596 pring->prt[3].lpfc_sli_rcv_unsol_event =
11597 lpfc_ct_unsol_event;
11598 return 0;
11599 }
11600
11601 /**
11602 * lpfc_sli_setup - SLI ring setup function
11603 * @phba: Pointer to HBA context object.
11604 *
11605 * lpfc_sli_setup sets up rings of the SLI interface with
11606 * number of iocbs per ring and iotags. This function is
11607 * called while driver attach to the HBA and before the
11608 * interrupts are enabled. So there is no need for locking.
11609 *
11610 * This function always returns 0. SLI3 only.
11611 **/
11612 int
lpfc_sli_setup(struct lpfc_hba * phba)11613 lpfc_sli_setup(struct lpfc_hba *phba)
11614 {
11615 int i, totiocbsize = 0;
11616 struct lpfc_sli *psli = &phba->sli;
11617 struct lpfc_sli_ring *pring;
11618
11619 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11620 psli->sli_flag = 0;
11621
11622 psli->iocbq_lookup = NULL;
11623 psli->iocbq_lookup_len = 0;
11624 psli->last_iotag = 0;
11625
11626 for (i = 0; i < psli->num_rings; i++) {
11627 pring = &psli->sli3_ring[i];
11628 switch (i) {
11629 case LPFC_FCP_RING: /* ring 0 - FCP */
11630 /* numCiocb and numRiocb are used in config_port */
11631 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11632 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11633 pring->sli.sli3.numCiocb +=
11634 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11635 pring->sli.sli3.numRiocb +=
11636 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11637 pring->sli.sli3.numCiocb +=
11638 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11639 pring->sli.sli3.numRiocb +=
11640 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11641 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11642 SLI3_IOCB_CMD_SIZE :
11643 SLI2_IOCB_CMD_SIZE;
11644 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11645 SLI3_IOCB_RSP_SIZE :
11646 SLI2_IOCB_RSP_SIZE;
11647 pring->iotag_ctr = 0;
11648 pring->iotag_max =
11649 (phba->cfg_hba_queue_depth * 2);
11650 pring->fast_iotag = pring->iotag_max;
11651 pring->num_mask = 0;
11652 break;
11653 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
11654 /* numCiocb and numRiocb are used in config_port */
11655 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11656 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11657 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11658 SLI3_IOCB_CMD_SIZE :
11659 SLI2_IOCB_CMD_SIZE;
11660 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11661 SLI3_IOCB_RSP_SIZE :
11662 SLI2_IOCB_RSP_SIZE;
11663 pring->iotag_max = phba->cfg_hba_queue_depth;
11664 pring->num_mask = 0;
11665 break;
11666 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
11667 /* numCiocb and numRiocb are used in config_port */
11668 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11669 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11670 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11671 SLI3_IOCB_CMD_SIZE :
11672 SLI2_IOCB_CMD_SIZE;
11673 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11674 SLI3_IOCB_RSP_SIZE :
11675 SLI2_IOCB_RSP_SIZE;
11676 pring->fast_iotag = 0;
11677 pring->iotag_ctr = 0;
11678 pring->iotag_max = 4096;
11679 pring->lpfc_sli_rcv_async_status =
11680 lpfc_sli_async_event_handler;
11681 pring->num_mask = LPFC_MAX_RING_MASK;
11682 pring->prt[0].profile = 0; /* Mask 0 */
11683 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11684 pring->prt[0].type = FC_TYPE_ELS;
11685 pring->prt[0].lpfc_sli_rcv_unsol_event =
11686 lpfc_els_unsol_event;
11687 pring->prt[1].profile = 0; /* Mask 1 */
11688 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11689 pring->prt[1].type = FC_TYPE_ELS;
11690 pring->prt[1].lpfc_sli_rcv_unsol_event =
11691 lpfc_els_unsol_event;
11692 pring->prt[2].profile = 0; /* Mask 2 */
11693 /* NameServer Inquiry */
11694 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11695 /* NameServer */
11696 pring->prt[2].type = FC_TYPE_CT;
11697 pring->prt[2].lpfc_sli_rcv_unsol_event =
11698 lpfc_ct_unsol_event;
11699 pring->prt[3].profile = 0; /* Mask 3 */
11700 /* NameServer response */
11701 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11702 /* NameServer */
11703 pring->prt[3].type = FC_TYPE_CT;
11704 pring->prt[3].lpfc_sli_rcv_unsol_event =
11705 lpfc_ct_unsol_event;
11706 break;
11707 }
11708 totiocbsize += (pring->sli.sli3.numCiocb *
11709 pring->sli.sli3.sizeCiocb) +
11710 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11711 }
11712 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11713 /* Too many cmd / rsp ring entries in SLI2 SLIM */
11714 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11715 "SLI2 SLIM Data: x%x x%lx\n",
11716 phba->brd_no, totiocbsize,
11717 (unsigned long) MAX_SLIM_IOCB_SIZE);
11718 }
11719 if (phba->cfg_multi_ring_support == 2)
11720 lpfc_extra_ring_setup(phba);
11721
11722 return 0;
11723 }
11724
11725 /**
11726 * lpfc_sli4_queue_init - Queue initialization function
11727 * @phba: Pointer to HBA context object.
11728 *
11729 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11730 * ring. This function also initializes ring indices of each ring.
11731 * This function is called during the initialization of the SLI
11732 * interface of an HBA.
11733 * This function is called with no lock held and always returns
11734 * 1.
11735 **/
11736 void
lpfc_sli4_queue_init(struct lpfc_hba * phba)11737 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11738 {
11739 struct lpfc_sli *psli;
11740 struct lpfc_sli_ring *pring;
11741 int i;
11742
11743 psli = &phba->sli;
11744 spin_lock_irq(&phba->hbalock);
11745 INIT_LIST_HEAD(&psli->mboxq);
11746 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11747 /* Initialize list headers for txq and txcmplq as double linked lists */
11748 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11749 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11750 pring->flag = 0;
11751 pring->ringno = LPFC_FCP_RING;
11752 pring->txcmplq_cnt = 0;
11753 INIT_LIST_HEAD(&pring->txq);
11754 INIT_LIST_HEAD(&pring->txcmplq);
11755 INIT_LIST_HEAD(&pring->iocb_continueq);
11756 spin_lock_init(&pring->ring_lock);
11757 }
11758 pring = phba->sli4_hba.els_wq->pring;
11759 pring->flag = 0;
11760 pring->ringno = LPFC_ELS_RING;
11761 pring->txcmplq_cnt = 0;
11762 INIT_LIST_HEAD(&pring->txq);
11763 INIT_LIST_HEAD(&pring->txcmplq);
11764 INIT_LIST_HEAD(&pring->iocb_continueq);
11765 spin_lock_init(&pring->ring_lock);
11766
11767 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11768 pring = phba->sli4_hba.nvmels_wq->pring;
11769 pring->flag = 0;
11770 pring->ringno = LPFC_ELS_RING;
11771 pring->txcmplq_cnt = 0;
11772 INIT_LIST_HEAD(&pring->txq);
11773 INIT_LIST_HEAD(&pring->txcmplq);
11774 INIT_LIST_HEAD(&pring->iocb_continueq);
11775 spin_lock_init(&pring->ring_lock);
11776 }
11777
11778 spin_unlock_irq(&phba->hbalock);
11779 }
11780
11781 /**
11782 * lpfc_sli_queue_init - Queue initialization function
11783 * @phba: Pointer to HBA context object.
11784 *
11785 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11786 * ring. This function also initializes ring indices of each ring.
11787 * This function is called during the initialization of the SLI
11788 * interface of an HBA.
11789 * This function is called with no lock held and always returns
11790 * 1.
11791 **/
11792 void
lpfc_sli_queue_init(struct lpfc_hba * phba)11793 lpfc_sli_queue_init(struct lpfc_hba *phba)
11794 {
11795 struct lpfc_sli *psli;
11796 struct lpfc_sli_ring *pring;
11797 int i;
11798
11799 psli = &phba->sli;
11800 spin_lock_irq(&phba->hbalock);
11801 INIT_LIST_HEAD(&psli->mboxq);
11802 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11803 /* Initialize list headers for txq and txcmplq as double linked lists */
11804 for (i = 0; i < psli->num_rings; i++) {
11805 pring = &psli->sli3_ring[i];
11806 pring->ringno = i;
11807 pring->sli.sli3.next_cmdidx = 0;
11808 pring->sli.sli3.local_getidx = 0;
11809 pring->sli.sli3.cmdidx = 0;
11810 INIT_LIST_HEAD(&pring->iocb_continueq);
11811 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11812 INIT_LIST_HEAD(&pring->postbufq);
11813 pring->flag = 0;
11814 INIT_LIST_HEAD(&pring->txq);
11815 INIT_LIST_HEAD(&pring->txcmplq);
11816 spin_lock_init(&pring->ring_lock);
11817 }
11818 spin_unlock_irq(&phba->hbalock);
11819 }
11820
11821 /**
11822 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11823 * @phba: Pointer to HBA context object.
11824 *
11825 * This routine flushes the mailbox command subsystem. It will unconditionally
11826 * flush all the mailbox commands in the three possible stages in the mailbox
11827 * command sub-system: pending mailbox command queue; the outstanding mailbox
11828 * command; and completed mailbox command queue. It is caller's responsibility
11829 * to make sure that the driver is in the proper state to flush the mailbox
11830 * command sub-system. Namely, the posting of mailbox commands into the
11831 * pending mailbox command queue from the various clients must be stopped;
11832 * either the HBA is in a state that it will never works on the outstanding
11833 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11834 * mailbox command has been completed.
11835 **/
11836 static void
lpfc_sli_mbox_sys_flush(struct lpfc_hba * phba)11837 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11838 {
11839 LIST_HEAD(completions);
11840 struct lpfc_sli *psli = &phba->sli;
11841 LPFC_MBOXQ_t *pmb;
11842 unsigned long iflag;
11843
11844 /* Disable softirqs, including timers from obtaining phba->hbalock */
11845 local_bh_disable();
11846
11847 /* Flush all the mailbox commands in the mbox system */
11848 spin_lock_irqsave(&phba->hbalock, iflag);
11849
11850 /* The pending mailbox command queue */
11851 list_splice_init(&phba->sli.mboxq, &completions);
11852 /* The outstanding active mailbox command */
11853 if (psli->mbox_active) {
11854 list_add_tail(&psli->mbox_active->list, &completions);
11855 psli->mbox_active = NULL;
11856 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11857 }
11858 /* The completed mailbox command queue */
11859 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11860 spin_unlock_irqrestore(&phba->hbalock, iflag);
11861
11862 /* Enable softirqs again, done with phba->hbalock */
11863 local_bh_enable();
11864
11865 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11866 while (!list_empty(&completions)) {
11867 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11868 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11869 if (pmb->mbox_cmpl)
11870 pmb->mbox_cmpl(phba, pmb);
11871 }
11872 }
11873
11874 /**
11875 * lpfc_sli_host_down - Vport cleanup function
11876 * @vport: Pointer to virtual port object.
11877 *
11878 * lpfc_sli_host_down is called to clean up the resources
11879 * associated with a vport before destroying virtual
11880 * port data structures.
11881 * This function does following operations:
11882 * - Free discovery resources associated with this virtual
11883 * port.
11884 * - Free iocbs associated with this virtual port in
11885 * the txq.
11886 * - Send abort for all iocb commands associated with this
11887 * vport in txcmplq.
11888 *
11889 * This function is called with no lock held and always returns 1.
11890 **/
11891 int
lpfc_sli_host_down(struct lpfc_vport * vport)11892 lpfc_sli_host_down(struct lpfc_vport *vport)
11893 {
11894 LIST_HEAD(completions);
11895 struct lpfc_hba *phba = vport->phba;
11896 struct lpfc_sli *psli = &phba->sli;
11897 struct lpfc_queue *qp = NULL;
11898 struct lpfc_sli_ring *pring;
11899 struct lpfc_iocbq *iocb, *next_iocb;
11900 int i;
11901 unsigned long flags = 0;
11902 uint16_t prev_pring_flag;
11903
11904 lpfc_cleanup_discovery_resources(vport);
11905
11906 spin_lock_irqsave(&phba->hbalock, flags);
11907
11908 /*
11909 * Error everything on the txq since these iocbs
11910 * have not been given to the FW yet.
11911 * Also issue ABTS for everything on the txcmplq
11912 */
11913 if (phba->sli_rev != LPFC_SLI_REV4) {
11914 for (i = 0; i < psli->num_rings; i++) {
11915 pring = &psli->sli3_ring[i];
11916 prev_pring_flag = pring->flag;
11917 /* Only slow rings */
11918 if (pring->ringno == LPFC_ELS_RING) {
11919 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11920 /* Set the lpfc data pending flag */
11921 set_bit(LPFC_DATA_READY, &phba->data_flags);
11922 }
11923 list_for_each_entry_safe(iocb, next_iocb,
11924 &pring->txq, list) {
11925 if (iocb->vport != vport)
11926 continue;
11927 list_move_tail(&iocb->list, &completions);
11928 }
11929 list_for_each_entry_safe(iocb, next_iocb,
11930 &pring->txcmplq, list) {
11931 if (iocb->vport != vport)
11932 continue;
11933 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11934 NULL);
11935 }
11936 pring->flag = prev_pring_flag;
11937 }
11938 } else {
11939 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11940 pring = qp->pring;
11941 if (!pring)
11942 continue;
11943 if (pring == phba->sli4_hba.els_wq->pring) {
11944 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11945 /* Set the lpfc data pending flag */
11946 set_bit(LPFC_DATA_READY, &phba->data_flags);
11947 }
11948 prev_pring_flag = pring->flag;
11949 spin_lock(&pring->ring_lock);
11950 list_for_each_entry_safe(iocb, next_iocb,
11951 &pring->txq, list) {
11952 if (iocb->vport != vport)
11953 continue;
11954 list_move_tail(&iocb->list, &completions);
11955 }
11956 spin_unlock(&pring->ring_lock);
11957 list_for_each_entry_safe(iocb, next_iocb,
11958 &pring->txcmplq, list) {
11959 if (iocb->vport != vport)
11960 continue;
11961 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11962 NULL);
11963 }
11964 pring->flag = prev_pring_flag;
11965 }
11966 }
11967 spin_unlock_irqrestore(&phba->hbalock, flags);
11968
11969 /* Make sure HBA is alive */
11970 lpfc_issue_hb_tmo(phba);
11971
11972 /* Cancel all the IOCBs from the completions list */
11973 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11974 IOERR_SLI_DOWN);
11975 return 1;
11976 }
11977
11978 /**
11979 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11980 * @phba: Pointer to HBA context object.
11981 *
11982 * This function cleans up all iocb, buffers, mailbox commands
11983 * while shutting down the HBA. This function is called with no
11984 * lock held and always returns 1.
11985 * This function does the following to cleanup driver resources:
11986 * - Free discovery resources for each virtual port
11987 * - Cleanup any pending fabric iocbs
11988 * - Iterate through the iocb txq and free each entry
11989 * in the list.
11990 * - Free up any buffer posted to the HBA
11991 * - Free mailbox commands in the mailbox queue.
11992 **/
11993 int
lpfc_sli_hba_down(struct lpfc_hba * phba)11994 lpfc_sli_hba_down(struct lpfc_hba *phba)
11995 {
11996 LIST_HEAD(completions);
11997 struct lpfc_sli *psli = &phba->sli;
11998 struct lpfc_queue *qp = NULL;
11999 struct lpfc_sli_ring *pring;
12000 struct lpfc_dmabuf *buf_ptr;
12001 unsigned long flags = 0;
12002 int i;
12003
12004 /* Shutdown the mailbox command sub-system */
12005 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
12006
12007 lpfc_hba_down_prep(phba);
12008
12009 /* Disable softirqs, including timers from obtaining phba->hbalock */
12010 local_bh_disable();
12011
12012 lpfc_fabric_abort_hba(phba);
12013
12014 spin_lock_irqsave(&phba->hbalock, flags);
12015
12016 /*
12017 * Error everything on the txq since these iocbs
12018 * have not been given to the FW yet.
12019 */
12020 if (phba->sli_rev != LPFC_SLI_REV4) {
12021 for (i = 0; i < psli->num_rings; i++) {
12022 pring = &psli->sli3_ring[i];
12023 /* Only slow rings */
12024 if (pring->ringno == LPFC_ELS_RING) {
12025 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12026 /* Set the lpfc data pending flag */
12027 set_bit(LPFC_DATA_READY, &phba->data_flags);
12028 }
12029 list_splice_init(&pring->txq, &completions);
12030 }
12031 } else {
12032 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12033 pring = qp->pring;
12034 if (!pring)
12035 continue;
12036 spin_lock(&pring->ring_lock);
12037 list_splice_init(&pring->txq, &completions);
12038 spin_unlock(&pring->ring_lock);
12039 if (pring == phba->sli4_hba.els_wq->pring) {
12040 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12041 /* Set the lpfc data pending flag */
12042 set_bit(LPFC_DATA_READY, &phba->data_flags);
12043 }
12044 }
12045 }
12046 spin_unlock_irqrestore(&phba->hbalock, flags);
12047
12048 /* Cancel all the IOCBs from the completions list */
12049 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12050 IOERR_SLI_DOWN);
12051
12052 spin_lock_irqsave(&phba->hbalock, flags);
12053 list_splice_init(&phba->elsbuf, &completions);
12054 phba->elsbuf_cnt = 0;
12055 phba->elsbuf_prev_cnt = 0;
12056 spin_unlock_irqrestore(&phba->hbalock, flags);
12057
12058 while (!list_empty(&completions)) {
12059 list_remove_head(&completions, buf_ptr,
12060 struct lpfc_dmabuf, list);
12061 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
12062 kfree(buf_ptr);
12063 }
12064
12065 /* Enable softirqs again, done with phba->hbalock */
12066 local_bh_enable();
12067
12068 /* Return any active mbox cmds */
12069 del_timer_sync(&psli->mbox_tmo);
12070
12071 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
12072 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12073 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
12074
12075 return 1;
12076 }
12077
12078 /**
12079 * lpfc_sli_pcimem_bcopy - SLI memory copy function
12080 * @srcp: Source memory pointer.
12081 * @destp: Destination memory pointer.
12082 * @cnt: Number of words required to be copied.
12083 *
12084 * This function is used for copying data between driver memory
12085 * and the SLI memory. This function also changes the endianness
12086 * of each word if native endianness is different from SLI
12087 * endianness. This function can be called with or without
12088 * lock.
12089 **/
12090 void
lpfc_sli_pcimem_bcopy(void * srcp,void * destp,uint32_t cnt)12091 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
12092 {
12093 uint32_t *src = srcp;
12094 uint32_t *dest = destp;
12095 uint32_t ldata;
12096 int i;
12097
12098 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
12099 ldata = *src;
12100 ldata = le32_to_cpu(ldata);
12101 *dest = ldata;
12102 src++;
12103 dest++;
12104 }
12105 }
12106
12107
12108 /**
12109 * lpfc_sli_bemem_bcopy - SLI memory copy function
12110 * @srcp: Source memory pointer.
12111 * @destp: Destination memory pointer.
12112 * @cnt: Number of words required to be copied.
12113 *
12114 * This function is used for copying data between a data structure
12115 * with big endian representation to local endianness.
12116 * This function can be called with or without lock.
12117 **/
12118 void
lpfc_sli_bemem_bcopy(void * srcp,void * destp,uint32_t cnt)12119 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
12120 {
12121 uint32_t *src = srcp;
12122 uint32_t *dest = destp;
12123 uint32_t ldata;
12124 int i;
12125
12126 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
12127 ldata = *src;
12128 ldata = be32_to_cpu(ldata);
12129 *dest = ldata;
12130 src++;
12131 dest++;
12132 }
12133 }
12134
12135 /**
12136 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
12137 * @phba: Pointer to HBA context object.
12138 * @pring: Pointer to driver SLI ring object.
12139 * @mp: Pointer to driver buffer object.
12140 *
12141 * This function is called with no lock held.
12142 * It always return zero after adding the buffer to the postbufq
12143 * buffer list.
12144 **/
12145 int
lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_dmabuf * mp)12146 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12147 struct lpfc_dmabuf *mp)
12148 {
12149 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
12150 later */
12151 spin_lock_irq(&phba->hbalock);
12152 list_add_tail(&mp->list, &pring->postbufq);
12153 pring->postbufq_cnt++;
12154 spin_unlock_irq(&phba->hbalock);
12155 return 0;
12156 }
12157
12158 /**
12159 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
12160 * @phba: Pointer to HBA context object.
12161 *
12162 * When HBQ is enabled, buffers are searched based on tags. This function
12163 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
12164 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
12165 * does not conflict with tags of buffer posted for unsolicited events.
12166 * The function returns the allocated tag. The function is called with
12167 * no locks held.
12168 **/
12169 uint32_t
lpfc_sli_get_buffer_tag(struct lpfc_hba * phba)12170 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
12171 {
12172 spin_lock_irq(&phba->hbalock);
12173 phba->buffer_tag_count++;
12174 /*
12175 * Always set the QUE_BUFTAG_BIT to distiguish between
12176 * a tag assigned by HBQ.
12177 */
12178 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
12179 spin_unlock_irq(&phba->hbalock);
12180 return phba->buffer_tag_count;
12181 }
12182
12183 /**
12184 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
12185 * @phba: Pointer to HBA context object.
12186 * @pring: Pointer to driver SLI ring object.
12187 * @tag: Buffer tag.
12188 *
12189 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
12190 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
12191 * iocb is posted to the response ring with the tag of the buffer.
12192 * This function searches the pring->postbufq list using the tag
12193 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
12194 * iocb. If the buffer is found then lpfc_dmabuf object of the
12195 * buffer is returned to the caller else NULL is returned.
12196 * This function is called with no lock held.
12197 **/
12198 struct lpfc_dmabuf *
lpfc_sli_ring_taggedbuf_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t tag)12199 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12200 uint32_t tag)
12201 {
12202 struct lpfc_dmabuf *mp, *next_mp;
12203 struct list_head *slp = &pring->postbufq;
12204
12205 /* Search postbufq, from the beginning, looking for a match on tag */
12206 spin_lock_irq(&phba->hbalock);
12207 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12208 if (mp->buffer_tag == tag) {
12209 list_del_init(&mp->list);
12210 pring->postbufq_cnt--;
12211 spin_unlock_irq(&phba->hbalock);
12212 return mp;
12213 }
12214 }
12215
12216 spin_unlock_irq(&phba->hbalock);
12217 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12218 "0402 Cannot find virtual addr for buffer tag on "
12219 "ring %d Data x%lx x%px x%px x%x\n",
12220 pring->ringno, (unsigned long) tag,
12221 slp->next, slp->prev, pring->postbufq_cnt);
12222
12223 return NULL;
12224 }
12225
12226 /**
12227 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
12228 * @phba: Pointer to HBA context object.
12229 * @pring: Pointer to driver SLI ring object.
12230 * @phys: DMA address of the buffer.
12231 *
12232 * This function searches the buffer list using the dma_address
12233 * of unsolicited event to find the driver's lpfc_dmabuf object
12234 * corresponding to the dma_address. The function returns the
12235 * lpfc_dmabuf object if a buffer is found else it returns NULL.
12236 * This function is called by the ct and els unsolicited event
12237 * handlers to get the buffer associated with the unsolicited
12238 * event.
12239 *
12240 * This function is called with no lock held.
12241 **/
12242 struct lpfc_dmabuf *
lpfc_sli_ringpostbuf_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,dma_addr_t phys)12243 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12244 dma_addr_t phys)
12245 {
12246 struct lpfc_dmabuf *mp, *next_mp;
12247 struct list_head *slp = &pring->postbufq;
12248
12249 /* Search postbufq, from the beginning, looking for a match on phys */
12250 spin_lock_irq(&phba->hbalock);
12251 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12252 if (mp->phys == phys) {
12253 list_del_init(&mp->list);
12254 pring->postbufq_cnt--;
12255 spin_unlock_irq(&phba->hbalock);
12256 return mp;
12257 }
12258 }
12259
12260 spin_unlock_irq(&phba->hbalock);
12261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12262 "0410 Cannot find virtual addr for mapped buf on "
12263 "ring %d Data x%llx x%px x%px x%x\n",
12264 pring->ringno, (unsigned long long)phys,
12265 slp->next, slp->prev, pring->postbufq_cnt);
12266 return NULL;
12267 }
12268
12269 /**
12270 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12271 * @phba: Pointer to HBA context object.
12272 * @cmdiocb: Pointer to driver command iocb object.
12273 * @rspiocb: Pointer to driver response iocb object.
12274 *
12275 * This function is the completion handler for the abort iocbs for
12276 * ELS commands. This function is called from the ELS ring event
12277 * handler with no lock held. This function frees memory resources
12278 * associated with the abort iocb.
12279 **/
12280 static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12281 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12282 struct lpfc_iocbq *rspiocb)
12283 {
12284 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12285 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12286 u8 cmnd = get_job_cmnd(phba, cmdiocb);
12287
12288 if (ulp_status) {
12289 /*
12290 * Assume that the port already completed and returned, or
12291 * will return the iocb. Just Log the message.
12292 */
12293 if (phba->sli_rev < LPFC_SLI_REV4) {
12294 if (cmnd == CMD_ABORT_XRI_CX &&
12295 ulp_status == IOSTAT_LOCAL_REJECT &&
12296 ulp_word4 == IOERR_ABORT_REQUESTED) {
12297 goto release_iocb;
12298 }
12299 }
12300
12301 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12302 "0327 Cannot abort els iocb x%px "
12303 "with io cmd xri %x abort tag : x%x, "
12304 "abort status %x abort code %x\n",
12305 cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12306 (phba->sli_rev == LPFC_SLI_REV4) ?
12307 get_wqe_reqtag(cmdiocb) :
12308 cmdiocb->iocb.un.acxri.abortContextTag,
12309 ulp_status, ulp_word4);
12310
12311 }
12312 release_iocb:
12313 lpfc_sli_release_iocbq(phba, cmdiocb);
12314 return;
12315 }
12316
12317 /**
12318 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12319 * @phba: Pointer to HBA context object.
12320 * @cmdiocb: Pointer to driver command iocb object.
12321 * @rspiocb: Pointer to driver response iocb object.
12322 *
12323 * The function is called from SLI ring event handler with no
12324 * lock held. This function is the completion handler for ELS commands
12325 * which are aborted. The function frees memory resources used for
12326 * the aborted ELS commands.
12327 **/
12328 void
lpfc_ignore_els_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12329 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12330 struct lpfc_iocbq *rspiocb)
12331 {
12332 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12333 IOCB_t *irsp;
12334 LPFC_MBOXQ_t *mbox;
12335 u32 ulp_command, ulp_status, ulp_word4, iotag;
12336
12337 ulp_command = get_job_cmnd(phba, cmdiocb);
12338 ulp_status = get_job_ulpstatus(phba, rspiocb);
12339 ulp_word4 = get_job_word4(phba, rspiocb);
12340
12341 if (phba->sli_rev == LPFC_SLI_REV4) {
12342 iotag = get_wqe_reqtag(cmdiocb);
12343 } else {
12344 irsp = &rspiocb->iocb;
12345 iotag = irsp->ulpIoTag;
12346
12347 /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
12348 * The MBX_REG_LOGIN64 mbox command is freed back to the
12349 * mbox_mem_pool here.
12350 */
12351 if (cmdiocb->context_un.mbox) {
12352 mbox = cmdiocb->context_un.mbox;
12353 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
12354 cmdiocb->context_un.mbox = NULL;
12355 }
12356 }
12357
12358 /* ELS cmd tag <ulpIoTag> completes */
12359 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12360 "0139 Ignoring ELS cmd code x%x completion Data: "
12361 "x%x x%x x%x x%px\n",
12362 ulp_command, ulp_status, ulp_word4, iotag,
12363 cmdiocb->ndlp);
12364 /*
12365 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12366 * if exchange is busy.
12367 */
12368 if (ulp_command == CMD_GEN_REQUEST64_CR)
12369 lpfc_ct_free_iocb(phba, cmdiocb);
12370 else
12371 lpfc_els_free_iocb(phba, cmdiocb);
12372
12373 lpfc_nlp_put(ndlp);
12374 }
12375
12376 /**
12377 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12378 * @phba: Pointer to HBA context object.
12379 * @pring: Pointer to driver SLI ring object.
12380 * @cmdiocb: Pointer to driver command iocb object.
12381 * @cmpl: completion function.
12382 *
12383 * This function issues an abort iocb for the provided command iocb. In case
12384 * of unloading, the abort iocb will not be issued to commands on the ELS
12385 * ring. Instead, the callback function shall be changed to those commands
12386 * so that nothing happens when them finishes. This function is called with
12387 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12388 * when the command iocb is an abort request.
12389 *
12390 **/
12391 int
lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * cmdiocb,void * cmpl)12392 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12393 struct lpfc_iocbq *cmdiocb, void *cmpl)
12394 {
12395 struct lpfc_vport *vport = cmdiocb->vport;
12396 struct lpfc_iocbq *abtsiocbp;
12397 int retval = IOCB_ERROR;
12398 unsigned long iflags;
12399 struct lpfc_nodelist *ndlp = NULL;
12400 u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12401 u16 ulp_context, iotag;
12402 bool ia;
12403
12404 /*
12405 * There are certain command types we don't want to abort. And we
12406 * don't want to abort commands that are already in the process of
12407 * being aborted.
12408 */
12409 if (ulp_command == CMD_ABORT_XRI_WQE ||
12410 ulp_command == CMD_ABORT_XRI_CN ||
12411 ulp_command == CMD_CLOSE_XRI_CN ||
12412 cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12413 return IOCB_ABORTING;
12414
12415 if (!pring) {
12416 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12417 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12418 else
12419 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12420 return retval;
12421 }
12422
12423 /*
12424 * If we're unloading, don't abort iocb on the ELS ring, but change
12425 * the callback so that nothing happens when it finishes.
12426 */
12427 if ((vport->load_flag & FC_UNLOADING) &&
12428 pring->ringno == LPFC_ELS_RING) {
12429 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12430 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12431 else
12432 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12433 return retval;
12434 }
12435
12436 /* issue ABTS for this IOCB based on iotag */
12437 abtsiocbp = __lpfc_sli_get_iocbq(phba);
12438 if (abtsiocbp == NULL)
12439 return IOCB_NORESOURCE;
12440
12441 /* This signals the response to set the correct status
12442 * before calling the completion handler
12443 */
12444 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12445
12446 if (phba->sli_rev == LPFC_SLI_REV4) {
12447 ulp_context = cmdiocb->sli4_xritag;
12448 iotag = abtsiocbp->iotag;
12449 } else {
12450 iotag = cmdiocb->iocb.ulpIoTag;
12451 if (pring->ringno == LPFC_ELS_RING) {
12452 ndlp = cmdiocb->ndlp;
12453 ulp_context = ndlp->nlp_rpi;
12454 } else {
12455 ulp_context = cmdiocb->iocb.ulpContext;
12456 }
12457 }
12458
12459 if (phba->link_state < LPFC_LINK_UP ||
12460 (phba->sli_rev == LPFC_SLI_REV4 &&
12461 phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
12462 (phba->link_flag & LS_EXTERNAL_LOOPBACK))
12463 ia = true;
12464 else
12465 ia = false;
12466
12467 lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12468 cmdiocb->iocb.ulpClass,
12469 LPFC_WQE_CQ_ID_DEFAULT, ia, false);
12470
12471 abtsiocbp->vport = vport;
12472
12473 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12474 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12475 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12476 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12477
12478 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12479 abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12480
12481 if (cmpl)
12482 abtsiocbp->cmd_cmpl = cmpl;
12483 else
12484 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12485 abtsiocbp->vport = vport;
12486
12487 if (phba->sli_rev == LPFC_SLI_REV4) {
12488 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12489 if (unlikely(pring == NULL))
12490 goto abort_iotag_exit;
12491 /* Note: both hbalock and ring_lock need to be set here */
12492 spin_lock_irqsave(&pring->ring_lock, iflags);
12493 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12494 abtsiocbp, 0);
12495 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12496 } else {
12497 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12498 abtsiocbp, 0);
12499 }
12500
12501 abort_iotag_exit:
12502
12503 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12504 "0339 Abort IO XRI x%x, Original iotag x%x, "
12505 "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12506 "retval x%x\n",
12507 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12508 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12509 retval);
12510 if (retval) {
12511 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12512 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12513 }
12514
12515 /*
12516 * Caller to this routine should check for IOCB_ERROR
12517 * and handle it properly. This routine no longer removes
12518 * iocb off txcmplq and call compl in case of IOCB_ERROR.
12519 */
12520 return retval;
12521 }
12522
12523 /**
12524 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12525 * @phba: pointer to lpfc HBA data structure.
12526 *
12527 * This routine will abort all pending and outstanding iocbs to an HBA.
12528 **/
12529 void
lpfc_sli_hba_iocb_abort(struct lpfc_hba * phba)12530 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12531 {
12532 struct lpfc_sli *psli = &phba->sli;
12533 struct lpfc_sli_ring *pring;
12534 struct lpfc_queue *qp = NULL;
12535 int i;
12536
12537 if (phba->sli_rev != LPFC_SLI_REV4) {
12538 for (i = 0; i < psli->num_rings; i++) {
12539 pring = &psli->sli3_ring[i];
12540 lpfc_sli_abort_iocb_ring(phba, pring);
12541 }
12542 return;
12543 }
12544 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12545 pring = qp->pring;
12546 if (!pring)
12547 continue;
12548 lpfc_sli_abort_iocb_ring(phba, pring);
12549 }
12550 }
12551
12552 /**
12553 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12554 * @iocbq: Pointer to iocb object.
12555 * @vport: Pointer to driver virtual port object.
12556 *
12557 * This function acts as an iocb filter for functions which abort FCP iocbs.
12558 *
12559 * Return values
12560 * -ENODEV, if a null iocb or vport ptr is encountered
12561 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12562 * driver already started the abort process, or is an abort iocb itself
12563 * 0, passes criteria for aborting the FCP I/O iocb
12564 **/
12565 static int
lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq * iocbq,struct lpfc_vport * vport)12566 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12567 struct lpfc_vport *vport)
12568 {
12569 u8 ulp_command;
12570
12571 /* No null ptr vports */
12572 if (!iocbq || iocbq->vport != vport)
12573 return -ENODEV;
12574
12575 /* iocb must be for FCP IO, already exists on the TX cmpl queue,
12576 * can't be premarked as driver aborted, nor be an ABORT iocb itself
12577 */
12578 ulp_command = get_job_cmnd(vport->phba, iocbq);
12579 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12580 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12581 (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12582 (ulp_command == CMD_ABORT_XRI_CN ||
12583 ulp_command == CMD_CLOSE_XRI_CN ||
12584 ulp_command == CMD_ABORT_XRI_WQE))
12585 return -EINVAL;
12586
12587 return 0;
12588 }
12589
12590 /**
12591 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12592 * @iocbq: Pointer to driver iocb object.
12593 * @vport: Pointer to driver virtual port object.
12594 * @tgt_id: SCSI ID of the target.
12595 * @lun_id: LUN ID of the scsi device.
12596 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12597 *
12598 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12599 * host.
12600 *
12601 * It will return
12602 * 0 if the filtering criteria is met for the given iocb and will return
12603 * 1 if the filtering criteria is not met.
12604 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12605 * given iocb is for the SCSI device specified by vport, tgt_id and
12606 * lun_id parameter.
12607 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
12608 * given iocb is for the SCSI target specified by vport and tgt_id
12609 * parameters.
12610 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12611 * given iocb is for the SCSI host associated with the given vport.
12612 * This function is called with no locks held.
12613 **/
12614 static int
lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq * iocbq,struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd ctx_cmd)12615 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12616 uint16_t tgt_id, uint64_t lun_id,
12617 lpfc_ctx_cmd ctx_cmd)
12618 {
12619 struct lpfc_io_buf *lpfc_cmd;
12620 int rc = 1;
12621
12622 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12623
12624 if (lpfc_cmd->pCmd == NULL)
12625 return rc;
12626
12627 switch (ctx_cmd) {
12628 case LPFC_CTX_LUN:
12629 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12630 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12631 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12632 rc = 0;
12633 break;
12634 case LPFC_CTX_TGT:
12635 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12636 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12637 rc = 0;
12638 break;
12639 case LPFC_CTX_HOST:
12640 rc = 0;
12641 break;
12642 default:
12643 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12644 __func__, ctx_cmd);
12645 break;
12646 }
12647
12648 return rc;
12649 }
12650
12651 /**
12652 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12653 * @vport: Pointer to virtual port.
12654 * @tgt_id: SCSI ID of the target.
12655 * @lun_id: LUN ID of the scsi device.
12656 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12657 *
12658 * This function returns number of FCP commands pending for the vport.
12659 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12660 * commands pending on the vport associated with SCSI device specified
12661 * by tgt_id and lun_id parameters.
12662 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12663 * commands pending on the vport associated with SCSI target specified
12664 * by tgt_id parameter.
12665 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12666 * commands pending on the vport.
12667 * This function returns the number of iocbs which satisfy the filter.
12668 * This function is called without any lock held.
12669 **/
12670 int
lpfc_sli_sum_iocb(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd ctx_cmd)12671 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12672 lpfc_ctx_cmd ctx_cmd)
12673 {
12674 struct lpfc_hba *phba = vport->phba;
12675 struct lpfc_iocbq *iocbq;
12676 int sum, i;
12677 unsigned long iflags;
12678 u8 ulp_command;
12679
12680 spin_lock_irqsave(&phba->hbalock, iflags);
12681 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12682 iocbq = phba->sli.iocbq_lookup[i];
12683
12684 if (!iocbq || iocbq->vport != vport)
12685 continue;
12686 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12687 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12688 continue;
12689
12690 /* Include counting outstanding aborts */
12691 ulp_command = get_job_cmnd(phba, iocbq);
12692 if (ulp_command == CMD_ABORT_XRI_CN ||
12693 ulp_command == CMD_CLOSE_XRI_CN ||
12694 ulp_command == CMD_ABORT_XRI_WQE) {
12695 sum++;
12696 continue;
12697 }
12698
12699 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12700 ctx_cmd) == 0)
12701 sum++;
12702 }
12703 spin_unlock_irqrestore(&phba->hbalock, iflags);
12704
12705 return sum;
12706 }
12707
12708 /**
12709 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12710 * @phba: Pointer to HBA context object
12711 * @cmdiocb: Pointer to command iocb object.
12712 * @rspiocb: Pointer to response iocb object.
12713 *
12714 * This function is called when an aborted FCP iocb completes. This
12715 * function is called by the ring event handler with no lock held.
12716 * This function frees the iocb.
12717 **/
12718 void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12719 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12720 struct lpfc_iocbq *rspiocb)
12721 {
12722 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12723 "3096 ABORT_XRI_CX completing on rpi x%x "
12724 "original iotag x%x, abort cmd iotag x%x "
12725 "status 0x%x, reason 0x%x\n",
12726 (phba->sli_rev == LPFC_SLI_REV4) ?
12727 cmdiocb->sli4_xritag :
12728 cmdiocb->iocb.un.acxri.abortContextTag,
12729 get_job_abtsiotag(phba, cmdiocb),
12730 cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12731 get_job_word4(phba, rspiocb));
12732 lpfc_sli_release_iocbq(phba, cmdiocb);
12733 return;
12734 }
12735
12736 /**
12737 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12738 * @vport: Pointer to virtual port.
12739 * @tgt_id: SCSI ID of the target.
12740 * @lun_id: LUN ID of the scsi device.
12741 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12742 *
12743 * This function sends an abort command for every SCSI command
12744 * associated with the given virtual port pending on the ring
12745 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12746 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12747 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12748 * followed by lpfc_sli_validate_fcp_iocb.
12749 *
12750 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12751 * FCP iocbs associated with lun specified by tgt_id and lun_id
12752 * parameters
12753 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12754 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12755 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12756 * FCP iocbs associated with virtual port.
12757 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12758 * lpfc_sli4_calc_ring is used.
12759 * This function returns number of iocbs it failed to abort.
12760 * This function is called with no locks held.
12761 **/
12762 int
lpfc_sli_abort_iocb(struct lpfc_vport * vport,u16 tgt_id,u64 lun_id,lpfc_ctx_cmd abort_cmd)12763 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12764 lpfc_ctx_cmd abort_cmd)
12765 {
12766 struct lpfc_hba *phba = vport->phba;
12767 struct lpfc_sli_ring *pring = NULL;
12768 struct lpfc_iocbq *iocbq;
12769 int errcnt = 0, ret_val = 0;
12770 unsigned long iflags;
12771 int i;
12772
12773 /* all I/Os are in process of being flushed */
12774 if (phba->hba_flag & HBA_IOQ_FLUSH)
12775 return errcnt;
12776
12777 for (i = 1; i <= phba->sli.last_iotag; i++) {
12778 iocbq = phba->sli.iocbq_lookup[i];
12779
12780 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12781 continue;
12782
12783 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12784 abort_cmd) != 0)
12785 continue;
12786
12787 spin_lock_irqsave(&phba->hbalock, iflags);
12788 if (phba->sli_rev == LPFC_SLI_REV3) {
12789 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12790 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12791 pring = lpfc_sli4_calc_ring(phba, iocbq);
12792 }
12793 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12794 lpfc_sli_abort_fcp_cmpl);
12795 spin_unlock_irqrestore(&phba->hbalock, iflags);
12796 if (ret_val != IOCB_SUCCESS)
12797 errcnt++;
12798 }
12799
12800 return errcnt;
12801 }
12802
12803 /**
12804 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12805 * @vport: Pointer to virtual port.
12806 * @pring: Pointer to driver SLI ring object.
12807 * @tgt_id: SCSI ID of the target.
12808 * @lun_id: LUN ID of the scsi device.
12809 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12810 *
12811 * This function sends an abort command for every SCSI command
12812 * associated with the given virtual port pending on the ring
12813 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12814 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12815 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12816 * followed by lpfc_sli_validate_fcp_iocb.
12817 *
12818 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12819 * FCP iocbs associated with lun specified by tgt_id and lun_id
12820 * parameters
12821 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12822 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12823 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12824 * FCP iocbs associated with virtual port.
12825 * This function returns number of iocbs it aborted .
12826 * This function is called with no locks held right after a taskmgmt
12827 * command is sent.
12828 **/
12829 int
lpfc_sli_abort_taskmgmt(struct lpfc_vport * vport,struct lpfc_sli_ring * pring,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd cmd)12830 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12831 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12832 {
12833 struct lpfc_hba *phba = vport->phba;
12834 struct lpfc_io_buf *lpfc_cmd;
12835 struct lpfc_iocbq *abtsiocbq;
12836 struct lpfc_nodelist *ndlp = NULL;
12837 struct lpfc_iocbq *iocbq;
12838 int sum, i, ret_val;
12839 unsigned long iflags;
12840 struct lpfc_sli_ring *pring_s4 = NULL;
12841 u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12842 bool ia;
12843
12844 spin_lock_irqsave(&phba->hbalock, iflags);
12845
12846 /* all I/Os are in process of being flushed */
12847 if (phba->hba_flag & HBA_IOQ_FLUSH) {
12848 spin_unlock_irqrestore(&phba->hbalock, iflags);
12849 return 0;
12850 }
12851 sum = 0;
12852
12853 for (i = 1; i <= phba->sli.last_iotag; i++) {
12854 iocbq = phba->sli.iocbq_lookup[i];
12855
12856 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12857 continue;
12858
12859 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12860 cmd) != 0)
12861 continue;
12862
12863 /* Guard against IO completion being called at same time */
12864 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12865 spin_lock(&lpfc_cmd->buf_lock);
12866
12867 if (!lpfc_cmd->pCmd) {
12868 spin_unlock(&lpfc_cmd->buf_lock);
12869 continue;
12870 }
12871
12872 if (phba->sli_rev == LPFC_SLI_REV4) {
12873 pring_s4 =
12874 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12875 if (!pring_s4) {
12876 spin_unlock(&lpfc_cmd->buf_lock);
12877 continue;
12878 }
12879 /* Note: both hbalock and ring_lock must be set here */
12880 spin_lock(&pring_s4->ring_lock);
12881 }
12882
12883 /*
12884 * If the iocbq is already being aborted, don't take a second
12885 * action, but do count it.
12886 */
12887 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12888 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12889 if (phba->sli_rev == LPFC_SLI_REV4)
12890 spin_unlock(&pring_s4->ring_lock);
12891 spin_unlock(&lpfc_cmd->buf_lock);
12892 continue;
12893 }
12894
12895 /* issue ABTS for this IOCB based on iotag */
12896 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12897 if (!abtsiocbq) {
12898 if (phba->sli_rev == LPFC_SLI_REV4)
12899 spin_unlock(&pring_s4->ring_lock);
12900 spin_unlock(&lpfc_cmd->buf_lock);
12901 continue;
12902 }
12903
12904 if (phba->sli_rev == LPFC_SLI_REV4) {
12905 iotag = abtsiocbq->iotag;
12906 ulp_context = iocbq->sli4_xritag;
12907 cqid = lpfc_cmd->hdwq->io_cq_map;
12908 } else {
12909 iotag = iocbq->iocb.ulpIoTag;
12910 if (pring->ringno == LPFC_ELS_RING) {
12911 ndlp = iocbq->ndlp;
12912 ulp_context = ndlp->nlp_rpi;
12913 } else {
12914 ulp_context = iocbq->iocb.ulpContext;
12915 }
12916 }
12917
12918 ndlp = lpfc_cmd->rdata->pnode;
12919
12920 if (lpfc_is_link_up(phba) &&
12921 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
12922 !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
12923 ia = false;
12924 else
12925 ia = true;
12926
12927 lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12928 iocbq->iocb.ulpClass, cqid,
12929 ia, false);
12930
12931 abtsiocbq->vport = vport;
12932
12933 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12934 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12935 if (iocbq->cmd_flag & LPFC_IO_FCP)
12936 abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12937 if (iocbq->cmd_flag & LPFC_IO_FOF)
12938 abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12939
12940 /* Setup callback routine and issue the command. */
12941 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12942
12943 /*
12944 * Indicate the IO is being aborted by the driver and set
12945 * the caller's flag into the aborted IO.
12946 */
12947 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12948
12949 if (phba->sli_rev == LPFC_SLI_REV4) {
12950 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12951 abtsiocbq, 0);
12952 spin_unlock(&pring_s4->ring_lock);
12953 } else {
12954 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12955 abtsiocbq, 0);
12956 }
12957
12958 spin_unlock(&lpfc_cmd->buf_lock);
12959
12960 if (ret_val == IOCB_ERROR)
12961 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12962 else
12963 sum++;
12964 }
12965 spin_unlock_irqrestore(&phba->hbalock, iflags);
12966 return sum;
12967 }
12968
12969 /**
12970 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12971 * @phba: Pointer to HBA context object.
12972 * @cmdiocbq: Pointer to command iocb.
12973 * @rspiocbq: Pointer to response iocb.
12974 *
12975 * This function is the completion handler for iocbs issued using
12976 * lpfc_sli_issue_iocb_wait function. This function is called by the
12977 * ring event handler function without any lock held. This function
12978 * can be called from both worker thread context and interrupt
12979 * context. This function also can be called from other thread which
12980 * cleans up the SLI layer objects.
12981 * This function copy the contents of the response iocb to the
12982 * response iocb memory object provided by the caller of
12983 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12984 * sleeps for the iocb completion.
12985 **/
12986 static void
lpfc_sli_wake_iocb_wait(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)12987 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12988 struct lpfc_iocbq *cmdiocbq,
12989 struct lpfc_iocbq *rspiocbq)
12990 {
12991 wait_queue_head_t *pdone_q;
12992 unsigned long iflags;
12993 struct lpfc_io_buf *lpfc_cmd;
12994 size_t offset = offsetof(struct lpfc_iocbq, wqe);
12995
12996 spin_lock_irqsave(&phba->hbalock, iflags);
12997 if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
12998
12999 /*
13000 * A time out has occurred for the iocb. If a time out
13001 * completion handler has been supplied, call it. Otherwise,
13002 * just free the iocbq.
13003 */
13004
13005 spin_unlock_irqrestore(&phba->hbalock, iflags);
13006 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
13007 cmdiocbq->wait_cmd_cmpl = NULL;
13008 if (cmdiocbq->cmd_cmpl)
13009 cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
13010 else
13011 lpfc_sli_release_iocbq(phba, cmdiocbq);
13012 return;
13013 }
13014
13015 /* Copy the contents of the local rspiocb into the caller's buffer. */
13016 cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
13017 if (cmdiocbq->rsp_iocb && rspiocbq)
13018 memcpy((char *)cmdiocbq->rsp_iocb + offset,
13019 (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
13020
13021 /* Set the exchange busy flag for task management commands */
13022 if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
13023 !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
13024 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
13025 cur_iocbq);
13026 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
13027 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
13028 else
13029 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
13030 }
13031
13032 pdone_q = cmdiocbq->context_un.wait_queue;
13033 if (pdone_q)
13034 wake_up(pdone_q);
13035 spin_unlock_irqrestore(&phba->hbalock, iflags);
13036 return;
13037 }
13038
13039 /**
13040 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
13041 * @phba: Pointer to HBA context object..
13042 * @piocbq: Pointer to command iocb.
13043 * @flag: Flag to test.
13044 *
13045 * This routine grabs the hbalock and then test the cmd_flag to
13046 * see if the passed in flag is set.
13047 * Returns:
13048 * 1 if flag is set.
13049 * 0 if flag is not set.
13050 **/
13051 static int
lpfc_chk_iocb_flg(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq,uint32_t flag)13052 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
13053 struct lpfc_iocbq *piocbq, uint32_t flag)
13054 {
13055 unsigned long iflags;
13056 int ret;
13057
13058 spin_lock_irqsave(&phba->hbalock, iflags);
13059 ret = piocbq->cmd_flag & flag;
13060 spin_unlock_irqrestore(&phba->hbalock, iflags);
13061 return ret;
13062
13063 }
13064
13065 /**
13066 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
13067 * @phba: Pointer to HBA context object..
13068 * @ring_number: Ring number
13069 * @piocb: Pointer to command iocb.
13070 * @prspiocbq: Pointer to response iocb.
13071 * @timeout: Timeout in number of seconds.
13072 *
13073 * This function issues the iocb to firmware and waits for the
13074 * iocb to complete. The cmd_cmpl field of the shall be used
13075 * to handle iocbs which time out. If the field is NULL, the
13076 * function shall free the iocbq structure. If more clean up is
13077 * needed, the caller is expected to provide a completion function
13078 * that will provide the needed clean up. If the iocb command is
13079 * not completed within timeout seconds, the function will either
13080 * free the iocbq structure (if cmd_cmpl == NULL) or execute the
13081 * completion function set in the cmd_cmpl field and then return
13082 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
13083 * resources if this function returns IOCB_TIMEDOUT.
13084 * The function waits for the iocb completion using an
13085 * non-interruptible wait.
13086 * This function will sleep while waiting for iocb completion.
13087 * So, this function should not be called from any context which
13088 * does not allow sleeping. Due to the same reason, this function
13089 * cannot be called with interrupt disabled.
13090 * This function assumes that the iocb completions occur while
13091 * this function sleep. So, this function cannot be called from
13092 * the thread which process iocb completion for this ring.
13093 * This function clears the cmd_flag of the iocb object before
13094 * issuing the iocb and the iocb completion handler sets this
13095 * flag and wakes this thread when the iocb completes.
13096 * The contents of the response iocb will be copied to prspiocbq
13097 * by the completion handler when the command completes.
13098 * This function returns IOCB_SUCCESS when success.
13099 * This function is called with no lock held.
13100 **/
13101 int
lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,struct lpfc_iocbq * prspiocbq,uint32_t timeout)13102 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
13103 uint32_t ring_number,
13104 struct lpfc_iocbq *piocb,
13105 struct lpfc_iocbq *prspiocbq,
13106 uint32_t timeout)
13107 {
13108 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
13109 long timeleft, timeout_req = 0;
13110 int retval = IOCB_SUCCESS;
13111 uint32_t creg_val;
13112 struct lpfc_iocbq *iocb;
13113 int txq_cnt = 0;
13114 int txcmplq_cnt = 0;
13115 struct lpfc_sli_ring *pring;
13116 unsigned long iflags;
13117 bool iocb_completed = true;
13118
13119 if (phba->sli_rev >= LPFC_SLI_REV4) {
13120 lpfc_sli_prep_wqe(phba, piocb);
13121
13122 pring = lpfc_sli4_calc_ring(phba, piocb);
13123 } else
13124 pring = &phba->sli.sli3_ring[ring_number];
13125 /*
13126 * If the caller has provided a response iocbq buffer, then rsp_iocb
13127 * is NULL or its an error.
13128 */
13129 if (prspiocbq) {
13130 if (piocb->rsp_iocb)
13131 return IOCB_ERROR;
13132 piocb->rsp_iocb = prspiocbq;
13133 }
13134
13135 piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
13136 piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
13137 piocb->context_un.wait_queue = &done_q;
13138 piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
13139
13140 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13141 if (lpfc_readl(phba->HCregaddr, &creg_val))
13142 return IOCB_ERROR;
13143 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
13144 writel(creg_val, phba->HCregaddr);
13145 readl(phba->HCregaddr); /* flush */
13146 }
13147
13148 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
13149 SLI_IOCB_RET_IOCB);
13150 if (retval == IOCB_SUCCESS) {
13151 timeout_req = msecs_to_jiffies(timeout * 1000);
13152 timeleft = wait_event_timeout(done_q,
13153 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
13154 timeout_req);
13155 spin_lock_irqsave(&phba->hbalock, iflags);
13156 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
13157
13158 /*
13159 * IOCB timed out. Inform the wake iocb wait
13160 * completion function and set local status
13161 */
13162
13163 iocb_completed = false;
13164 piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
13165 }
13166 spin_unlock_irqrestore(&phba->hbalock, iflags);
13167 if (iocb_completed) {
13168 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13169 "0331 IOCB wake signaled\n");
13170 /* Note: we are not indicating if the IOCB has a success
13171 * status or not - that's for the caller to check.
13172 * IOCB_SUCCESS means just that the command was sent and
13173 * completed. Not that it completed successfully.
13174 * */
13175 } else if (timeleft == 0) {
13176 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13177 "0338 IOCB wait timeout error - no "
13178 "wake response Data x%x\n", timeout);
13179 retval = IOCB_TIMEDOUT;
13180 } else {
13181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13182 "0330 IOCB wake NOT set, "
13183 "Data x%x x%lx\n",
13184 timeout, (timeleft / jiffies));
13185 retval = IOCB_TIMEDOUT;
13186 }
13187 } else if (retval == IOCB_BUSY) {
13188 if (phba->cfg_log_verbose & LOG_SLI) {
13189 list_for_each_entry(iocb, &pring->txq, list) {
13190 txq_cnt++;
13191 }
13192 list_for_each_entry(iocb, &pring->txcmplq, list) {
13193 txcmplq_cnt++;
13194 }
13195 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13196 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
13197 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
13198 }
13199 return retval;
13200 } else {
13201 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13202 "0332 IOCB wait issue failed, Data x%x\n",
13203 retval);
13204 retval = IOCB_ERROR;
13205 }
13206
13207 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13208 if (lpfc_readl(phba->HCregaddr, &creg_val))
13209 return IOCB_ERROR;
13210 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
13211 writel(creg_val, phba->HCregaddr);
13212 readl(phba->HCregaddr); /* flush */
13213 }
13214
13215 if (prspiocbq)
13216 piocb->rsp_iocb = NULL;
13217
13218 piocb->context_un.wait_queue = NULL;
13219 piocb->cmd_cmpl = NULL;
13220 return retval;
13221 }
13222
13223 /**
13224 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
13225 * @phba: Pointer to HBA context object.
13226 * @pmboxq: Pointer to driver mailbox object.
13227 * @timeout: Timeout in number of seconds.
13228 *
13229 * This function issues the mailbox to firmware and waits for the
13230 * mailbox command to complete. If the mailbox command is not
13231 * completed within timeout seconds, it returns MBX_TIMEOUT.
13232 * The function waits for the mailbox completion using an
13233 * interruptible wait. If the thread is woken up due to a
13234 * signal, MBX_TIMEOUT error is returned to the caller. Caller
13235 * should not free the mailbox resources, if this function returns
13236 * MBX_TIMEOUT.
13237 * This function will sleep while waiting for mailbox completion.
13238 * So, this function should not be called from any context which
13239 * does not allow sleeping. Due to the same reason, this function
13240 * cannot be called with interrupt disabled.
13241 * This function assumes that the mailbox completion occurs while
13242 * this function sleep. So, this function cannot be called from
13243 * the worker thread which processes mailbox completion.
13244 * This function is called in the context of HBA management
13245 * applications.
13246 * This function returns MBX_SUCCESS when successful.
13247 * This function is called with no lock held.
13248 **/
13249 int
lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq,uint32_t timeout)13250 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
13251 uint32_t timeout)
13252 {
13253 struct completion mbox_done;
13254 int retval;
13255 unsigned long flag;
13256
13257 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
13258 /* setup wake call as IOCB callback */
13259 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
13260
13261 /* setup context3 field to pass wait_queue pointer to wake function */
13262 init_completion(&mbox_done);
13263 pmboxq->context3 = &mbox_done;
13264 /* now issue the command */
13265 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13266 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13267 wait_for_completion_timeout(&mbox_done,
13268 msecs_to_jiffies(timeout * 1000));
13269
13270 spin_lock_irqsave(&phba->hbalock, flag);
13271 pmboxq->context3 = NULL;
13272 /*
13273 * if LPFC_MBX_WAKE flag is set the mailbox is completed
13274 * else do not free the resources.
13275 */
13276 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13277 retval = MBX_SUCCESS;
13278 } else {
13279 retval = MBX_TIMEOUT;
13280 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13281 }
13282 spin_unlock_irqrestore(&phba->hbalock, flag);
13283 }
13284 return retval;
13285 }
13286
13287 /**
13288 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13289 * @phba: Pointer to HBA context.
13290 * @mbx_action: Mailbox shutdown options.
13291 *
13292 * This function is called to shutdown the driver's mailbox sub-system.
13293 * It first marks the mailbox sub-system is in a block state to prevent
13294 * the asynchronous mailbox command from issued off the pending mailbox
13295 * command queue. If the mailbox command sub-system shutdown is due to
13296 * HBA error conditions such as EEH or ERATT, this routine shall invoke
13297 * the mailbox sub-system flush routine to forcefully bring down the
13298 * mailbox sub-system. Otherwise, if it is due to normal condition (such
13299 * as with offline or HBA function reset), this routine will wait for the
13300 * outstanding mailbox command to complete before invoking the mailbox
13301 * sub-system flush routine to gracefully bring down mailbox sub-system.
13302 **/
13303 void
lpfc_sli_mbox_sys_shutdown(struct lpfc_hba * phba,int mbx_action)13304 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13305 {
13306 struct lpfc_sli *psli = &phba->sli;
13307 unsigned long timeout;
13308
13309 if (mbx_action == LPFC_MBX_NO_WAIT) {
13310 /* delay 100ms for port state */
13311 msleep(100);
13312 lpfc_sli_mbox_sys_flush(phba);
13313 return;
13314 }
13315 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13316
13317 /* Disable softirqs, including timers from obtaining phba->hbalock */
13318 local_bh_disable();
13319
13320 spin_lock_irq(&phba->hbalock);
13321 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13322
13323 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13324 /* Determine how long we might wait for the active mailbox
13325 * command to be gracefully completed by firmware.
13326 */
13327 if (phba->sli.mbox_active)
13328 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13329 phba->sli.mbox_active) *
13330 1000) + jiffies;
13331 spin_unlock_irq(&phba->hbalock);
13332
13333 /* Enable softirqs again, done with phba->hbalock */
13334 local_bh_enable();
13335
13336 while (phba->sli.mbox_active) {
13337 /* Check active mailbox complete status every 2ms */
13338 msleep(2);
13339 if (time_after(jiffies, timeout))
13340 /* Timeout, let the mailbox flush routine to
13341 * forcefully release active mailbox command
13342 */
13343 break;
13344 }
13345 } else {
13346 spin_unlock_irq(&phba->hbalock);
13347
13348 /* Enable softirqs again, done with phba->hbalock */
13349 local_bh_enable();
13350 }
13351
13352 lpfc_sli_mbox_sys_flush(phba);
13353 }
13354
13355 /**
13356 * lpfc_sli_eratt_read - read sli-3 error attention events
13357 * @phba: Pointer to HBA context.
13358 *
13359 * This function is called to read the SLI3 device error attention registers
13360 * for possible error attention events. The caller must hold the hostlock
13361 * with spin_lock_irq().
13362 *
13363 * This function returns 1 when there is Error Attention in the Host Attention
13364 * Register and returns 0 otherwise.
13365 **/
13366 static int
lpfc_sli_eratt_read(struct lpfc_hba * phba)13367 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13368 {
13369 uint32_t ha_copy;
13370
13371 /* Read chip Host Attention (HA) register */
13372 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13373 goto unplug_err;
13374
13375 if (ha_copy & HA_ERATT) {
13376 /* Read host status register to retrieve error event */
13377 if (lpfc_sli_read_hs(phba))
13378 goto unplug_err;
13379
13380 /* Check if there is a deferred error condition is active */
13381 if ((HS_FFER1 & phba->work_hs) &&
13382 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13383 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13384 phba->hba_flag |= DEFER_ERATT;
13385 /* Clear all interrupt enable conditions */
13386 writel(0, phba->HCregaddr);
13387 readl(phba->HCregaddr);
13388 }
13389
13390 /* Set the driver HA work bitmap */
13391 phba->work_ha |= HA_ERATT;
13392 /* Indicate polling handles this ERATT */
13393 phba->hba_flag |= HBA_ERATT_HANDLED;
13394 return 1;
13395 }
13396 return 0;
13397
13398 unplug_err:
13399 /* Set the driver HS work bitmap */
13400 phba->work_hs |= UNPLUG_ERR;
13401 /* Set the driver HA work bitmap */
13402 phba->work_ha |= HA_ERATT;
13403 /* Indicate polling handles this ERATT */
13404 phba->hba_flag |= HBA_ERATT_HANDLED;
13405 return 1;
13406 }
13407
13408 /**
13409 * lpfc_sli4_eratt_read - read sli-4 error attention events
13410 * @phba: Pointer to HBA context.
13411 *
13412 * This function is called to read the SLI4 device error attention registers
13413 * for possible error attention events. The caller must hold the hostlock
13414 * with spin_lock_irq().
13415 *
13416 * This function returns 1 when there is Error Attention in the Host Attention
13417 * Register and returns 0 otherwise.
13418 **/
13419 static int
lpfc_sli4_eratt_read(struct lpfc_hba * phba)13420 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13421 {
13422 uint32_t uerr_sta_hi, uerr_sta_lo;
13423 uint32_t if_type, portsmphr;
13424 struct lpfc_register portstat_reg;
13425 u32 logmask;
13426
13427 /*
13428 * For now, use the SLI4 device internal unrecoverable error
13429 * registers for error attention. This can be changed later.
13430 */
13431 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13432 switch (if_type) {
13433 case LPFC_SLI_INTF_IF_TYPE_0:
13434 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13435 &uerr_sta_lo) ||
13436 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13437 &uerr_sta_hi)) {
13438 phba->work_hs |= UNPLUG_ERR;
13439 phba->work_ha |= HA_ERATT;
13440 phba->hba_flag |= HBA_ERATT_HANDLED;
13441 return 1;
13442 }
13443 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13444 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13445 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13446 "1423 HBA Unrecoverable error: "
13447 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13448 "ue_mask_lo_reg=0x%x, "
13449 "ue_mask_hi_reg=0x%x\n",
13450 uerr_sta_lo, uerr_sta_hi,
13451 phba->sli4_hba.ue_mask_lo,
13452 phba->sli4_hba.ue_mask_hi);
13453 phba->work_status[0] = uerr_sta_lo;
13454 phba->work_status[1] = uerr_sta_hi;
13455 phba->work_ha |= HA_ERATT;
13456 phba->hba_flag |= HBA_ERATT_HANDLED;
13457 return 1;
13458 }
13459 break;
13460 case LPFC_SLI_INTF_IF_TYPE_2:
13461 case LPFC_SLI_INTF_IF_TYPE_6:
13462 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13463 &portstat_reg.word0) ||
13464 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13465 &portsmphr)){
13466 phba->work_hs |= UNPLUG_ERR;
13467 phba->work_ha |= HA_ERATT;
13468 phba->hba_flag |= HBA_ERATT_HANDLED;
13469 return 1;
13470 }
13471 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13472 phba->work_status[0] =
13473 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13474 phba->work_status[1] =
13475 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13476 logmask = LOG_TRACE_EVENT;
13477 if (phba->work_status[0] ==
13478 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13479 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13480 logmask = LOG_SLI;
13481 lpfc_printf_log(phba, KERN_ERR, logmask,
13482 "2885 Port Status Event: "
13483 "port status reg 0x%x, "
13484 "port smphr reg 0x%x, "
13485 "error 1=0x%x, error 2=0x%x\n",
13486 portstat_reg.word0,
13487 portsmphr,
13488 phba->work_status[0],
13489 phba->work_status[1]);
13490 phba->work_ha |= HA_ERATT;
13491 phba->hba_flag |= HBA_ERATT_HANDLED;
13492 return 1;
13493 }
13494 break;
13495 case LPFC_SLI_INTF_IF_TYPE_1:
13496 default:
13497 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13498 "2886 HBA Error Attention on unsupported "
13499 "if type %d.", if_type);
13500 return 1;
13501 }
13502
13503 return 0;
13504 }
13505
13506 /**
13507 * lpfc_sli_check_eratt - check error attention events
13508 * @phba: Pointer to HBA context.
13509 *
13510 * This function is called from timer soft interrupt context to check HBA's
13511 * error attention register bit for error attention events.
13512 *
13513 * This function returns 1 when there is Error Attention in the Host Attention
13514 * Register and returns 0 otherwise.
13515 **/
13516 int
lpfc_sli_check_eratt(struct lpfc_hba * phba)13517 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13518 {
13519 uint32_t ha_copy;
13520
13521 /* If somebody is waiting to handle an eratt, don't process it
13522 * here. The brdkill function will do this.
13523 */
13524 if (phba->link_flag & LS_IGNORE_ERATT)
13525 return 0;
13526
13527 /* Check if interrupt handler handles this ERATT */
13528 spin_lock_irq(&phba->hbalock);
13529 if (phba->hba_flag & HBA_ERATT_HANDLED) {
13530 /* Interrupt handler has handled ERATT */
13531 spin_unlock_irq(&phba->hbalock);
13532 return 0;
13533 }
13534
13535 /*
13536 * If there is deferred error attention, do not check for error
13537 * attention
13538 */
13539 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13540 spin_unlock_irq(&phba->hbalock);
13541 return 0;
13542 }
13543
13544 /* If PCI channel is offline, don't process it */
13545 if (unlikely(pci_channel_offline(phba->pcidev))) {
13546 spin_unlock_irq(&phba->hbalock);
13547 return 0;
13548 }
13549
13550 switch (phba->sli_rev) {
13551 case LPFC_SLI_REV2:
13552 case LPFC_SLI_REV3:
13553 /* Read chip Host Attention (HA) register */
13554 ha_copy = lpfc_sli_eratt_read(phba);
13555 break;
13556 case LPFC_SLI_REV4:
13557 /* Read device Uncoverable Error (UERR) registers */
13558 ha_copy = lpfc_sli4_eratt_read(phba);
13559 break;
13560 default:
13561 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13562 "0299 Invalid SLI revision (%d)\n",
13563 phba->sli_rev);
13564 ha_copy = 0;
13565 break;
13566 }
13567 spin_unlock_irq(&phba->hbalock);
13568
13569 return ha_copy;
13570 }
13571
13572 /**
13573 * lpfc_intr_state_check - Check device state for interrupt handling
13574 * @phba: Pointer to HBA context.
13575 *
13576 * This inline routine checks whether a device or its PCI slot is in a state
13577 * that the interrupt should be handled.
13578 *
13579 * This function returns 0 if the device or the PCI slot is in a state that
13580 * interrupt should be handled, otherwise -EIO.
13581 */
13582 static inline int
lpfc_intr_state_check(struct lpfc_hba * phba)13583 lpfc_intr_state_check(struct lpfc_hba *phba)
13584 {
13585 /* If the pci channel is offline, ignore all the interrupts */
13586 if (unlikely(pci_channel_offline(phba->pcidev)))
13587 return -EIO;
13588
13589 /* Update device level interrupt statistics */
13590 phba->sli.slistat.sli_intr++;
13591
13592 /* Ignore all interrupts during initialization. */
13593 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13594 return -EIO;
13595
13596 return 0;
13597 }
13598
13599 /**
13600 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13601 * @irq: Interrupt number.
13602 * @dev_id: The device context pointer.
13603 *
13604 * This function is directly called from the PCI layer as an interrupt
13605 * service routine when device with SLI-3 interface spec is enabled with
13606 * MSI-X multi-message interrupt mode and there are slow-path events in
13607 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13608 * interrupt mode, this function is called as part of the device-level
13609 * interrupt handler. When the PCI slot is in error recovery or the HBA
13610 * is undergoing initialization, the interrupt handler will not process
13611 * the interrupt. The link attention and ELS ring attention events are
13612 * handled by the worker thread. The interrupt handler signals the worker
13613 * thread and returns for these events. This function is called without
13614 * any lock held. It gets the hbalock to access and update SLI data
13615 * structures.
13616 *
13617 * This function returns IRQ_HANDLED when interrupt is handled else it
13618 * returns IRQ_NONE.
13619 **/
13620 irqreturn_t
lpfc_sli_sp_intr_handler(int irq,void * dev_id)13621 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13622 {
13623 struct lpfc_hba *phba;
13624 uint32_t ha_copy, hc_copy;
13625 uint32_t work_ha_copy;
13626 unsigned long status;
13627 unsigned long iflag;
13628 uint32_t control;
13629
13630 MAILBOX_t *mbox, *pmbox;
13631 struct lpfc_vport *vport;
13632 struct lpfc_nodelist *ndlp;
13633 struct lpfc_dmabuf *mp;
13634 LPFC_MBOXQ_t *pmb;
13635 int rc;
13636
13637 /*
13638 * Get the driver's phba structure from the dev_id and
13639 * assume the HBA is not interrupting.
13640 */
13641 phba = (struct lpfc_hba *)dev_id;
13642
13643 if (unlikely(!phba))
13644 return IRQ_NONE;
13645
13646 /*
13647 * Stuff needs to be attented to when this function is invoked as an
13648 * individual interrupt handler in MSI-X multi-message interrupt mode
13649 */
13650 if (phba->intr_type == MSIX) {
13651 /* Check device state for handling interrupt */
13652 if (lpfc_intr_state_check(phba))
13653 return IRQ_NONE;
13654 /* Need to read HA REG for slow-path events */
13655 spin_lock_irqsave(&phba->hbalock, iflag);
13656 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13657 goto unplug_error;
13658 /* If somebody is waiting to handle an eratt don't process it
13659 * here. The brdkill function will do this.
13660 */
13661 if (phba->link_flag & LS_IGNORE_ERATT)
13662 ha_copy &= ~HA_ERATT;
13663 /* Check the need for handling ERATT in interrupt handler */
13664 if (ha_copy & HA_ERATT) {
13665 if (phba->hba_flag & HBA_ERATT_HANDLED)
13666 /* ERATT polling has handled ERATT */
13667 ha_copy &= ~HA_ERATT;
13668 else
13669 /* Indicate interrupt handler handles ERATT */
13670 phba->hba_flag |= HBA_ERATT_HANDLED;
13671 }
13672
13673 /*
13674 * If there is deferred error attention, do not check for any
13675 * interrupt.
13676 */
13677 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13678 spin_unlock_irqrestore(&phba->hbalock, iflag);
13679 return IRQ_NONE;
13680 }
13681
13682 /* Clear up only attention source related to slow-path */
13683 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13684 goto unplug_error;
13685
13686 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13687 HC_LAINT_ENA | HC_ERINT_ENA),
13688 phba->HCregaddr);
13689 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13690 phba->HAregaddr);
13691 writel(hc_copy, phba->HCregaddr);
13692 readl(phba->HAregaddr); /* flush */
13693 spin_unlock_irqrestore(&phba->hbalock, iflag);
13694 } else
13695 ha_copy = phba->ha_copy;
13696
13697 work_ha_copy = ha_copy & phba->work_ha_mask;
13698
13699 if (work_ha_copy) {
13700 if (work_ha_copy & HA_LATT) {
13701 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13702 /*
13703 * Turn off Link Attention interrupts
13704 * until CLEAR_LA done
13705 */
13706 spin_lock_irqsave(&phba->hbalock, iflag);
13707 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13708 if (lpfc_readl(phba->HCregaddr, &control))
13709 goto unplug_error;
13710 control &= ~HC_LAINT_ENA;
13711 writel(control, phba->HCregaddr);
13712 readl(phba->HCregaddr); /* flush */
13713 spin_unlock_irqrestore(&phba->hbalock, iflag);
13714 }
13715 else
13716 work_ha_copy &= ~HA_LATT;
13717 }
13718
13719 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13720 /*
13721 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13722 * the only slow ring.
13723 */
13724 status = (work_ha_copy &
13725 (HA_RXMASK << (4*LPFC_ELS_RING)));
13726 status >>= (4*LPFC_ELS_RING);
13727 if (status & HA_RXMASK) {
13728 spin_lock_irqsave(&phba->hbalock, iflag);
13729 if (lpfc_readl(phba->HCregaddr, &control))
13730 goto unplug_error;
13731
13732 lpfc_debugfs_slow_ring_trc(phba,
13733 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
13734 control, status,
13735 (uint32_t)phba->sli.slistat.sli_intr);
13736
13737 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13738 lpfc_debugfs_slow_ring_trc(phba,
13739 "ISR Disable ring:"
13740 "pwork:x%x hawork:x%x wait:x%x",
13741 phba->work_ha, work_ha_copy,
13742 (uint32_t)((unsigned long)
13743 &phba->work_waitq));
13744
13745 control &=
13746 ~(HC_R0INT_ENA << LPFC_ELS_RING);
13747 writel(control, phba->HCregaddr);
13748 readl(phba->HCregaddr); /* flush */
13749 }
13750 else {
13751 lpfc_debugfs_slow_ring_trc(phba,
13752 "ISR slow ring: pwork:"
13753 "x%x hawork:x%x wait:x%x",
13754 phba->work_ha, work_ha_copy,
13755 (uint32_t)((unsigned long)
13756 &phba->work_waitq));
13757 }
13758 spin_unlock_irqrestore(&phba->hbalock, iflag);
13759 }
13760 }
13761 spin_lock_irqsave(&phba->hbalock, iflag);
13762 if (work_ha_copy & HA_ERATT) {
13763 if (lpfc_sli_read_hs(phba))
13764 goto unplug_error;
13765 /*
13766 * Check if there is a deferred error condition
13767 * is active
13768 */
13769 if ((HS_FFER1 & phba->work_hs) &&
13770 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13771 HS_FFER6 | HS_FFER7 | HS_FFER8) &
13772 phba->work_hs)) {
13773 phba->hba_flag |= DEFER_ERATT;
13774 /* Clear all interrupt enable conditions */
13775 writel(0, phba->HCregaddr);
13776 readl(phba->HCregaddr);
13777 }
13778 }
13779
13780 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13781 pmb = phba->sli.mbox_active;
13782 pmbox = &pmb->u.mb;
13783 mbox = phba->mbox;
13784 vport = pmb->vport;
13785
13786 /* First check out the status word */
13787 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13788 if (pmbox->mbxOwner != OWN_HOST) {
13789 spin_unlock_irqrestore(&phba->hbalock, iflag);
13790 /*
13791 * Stray Mailbox Interrupt, mbxCommand <cmd>
13792 * mbxStatus <status>
13793 */
13794 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13795 "(%d):0304 Stray Mailbox "
13796 "Interrupt mbxCommand x%x "
13797 "mbxStatus x%x\n",
13798 (vport ? vport->vpi : 0),
13799 pmbox->mbxCommand,
13800 pmbox->mbxStatus);
13801 /* clear mailbox attention bit */
13802 work_ha_copy &= ~HA_MBATT;
13803 } else {
13804 phba->sli.mbox_active = NULL;
13805 spin_unlock_irqrestore(&phba->hbalock, iflag);
13806 phba->last_completion_time = jiffies;
13807 del_timer(&phba->sli.mbox_tmo);
13808 if (pmb->mbox_cmpl) {
13809 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13810 MAILBOX_CMD_SIZE);
13811 if (pmb->out_ext_byte_len &&
13812 pmb->ctx_buf)
13813 lpfc_sli_pcimem_bcopy(
13814 phba->mbox_ext,
13815 pmb->ctx_buf,
13816 pmb->out_ext_byte_len);
13817 }
13818 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13819 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13820
13821 lpfc_debugfs_disc_trc(vport,
13822 LPFC_DISC_TRC_MBOX_VPORT,
13823 "MBOX dflt rpi: : "
13824 "status:x%x rpi:x%x",
13825 (uint32_t)pmbox->mbxStatus,
13826 pmbox->un.varWords[0], 0);
13827
13828 if (!pmbox->mbxStatus) {
13829 mp = (struct lpfc_dmabuf *)
13830 (pmb->ctx_buf);
13831 ndlp = (struct lpfc_nodelist *)
13832 pmb->ctx_ndlp;
13833
13834 /* Reg_LOGIN of dflt RPI was
13835 * successful. new lets get
13836 * rid of the RPI using the
13837 * same mbox buffer.
13838 */
13839 lpfc_unreg_login(phba,
13840 vport->vpi,
13841 pmbox->un.varWords[0],
13842 pmb);
13843 pmb->mbox_cmpl =
13844 lpfc_mbx_cmpl_dflt_rpi;
13845 pmb->ctx_buf = mp;
13846 pmb->ctx_ndlp = ndlp;
13847 pmb->vport = vport;
13848 rc = lpfc_sli_issue_mbox(phba,
13849 pmb,
13850 MBX_NOWAIT);
13851 if (rc != MBX_BUSY)
13852 lpfc_printf_log(phba,
13853 KERN_ERR,
13854 LOG_TRACE_EVENT,
13855 "0350 rc should have"
13856 "been MBX_BUSY\n");
13857 if (rc != MBX_NOT_FINISHED)
13858 goto send_current_mbox;
13859 }
13860 }
13861 spin_lock_irqsave(
13862 &phba->pport->work_port_lock,
13863 iflag);
13864 phba->pport->work_port_events &=
13865 ~WORKER_MBOX_TMO;
13866 spin_unlock_irqrestore(
13867 &phba->pport->work_port_lock,
13868 iflag);
13869
13870 /* Do NOT queue MBX_HEARTBEAT to the worker
13871 * thread for processing.
13872 */
13873 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13874 /* Process mbox now */
13875 phba->sli.mbox_active = NULL;
13876 phba->sli.sli_flag &=
13877 ~LPFC_SLI_MBOX_ACTIVE;
13878 if (pmb->mbox_cmpl)
13879 pmb->mbox_cmpl(phba, pmb);
13880 } else {
13881 /* Queue to worker thread to process */
13882 lpfc_mbox_cmpl_put(phba, pmb);
13883 }
13884 }
13885 } else
13886 spin_unlock_irqrestore(&phba->hbalock, iflag);
13887
13888 if ((work_ha_copy & HA_MBATT) &&
13889 (phba->sli.mbox_active == NULL)) {
13890 send_current_mbox:
13891 /* Process next mailbox command if there is one */
13892 do {
13893 rc = lpfc_sli_issue_mbox(phba, NULL,
13894 MBX_NOWAIT);
13895 } while (rc == MBX_NOT_FINISHED);
13896 if (rc != MBX_SUCCESS)
13897 lpfc_printf_log(phba, KERN_ERR,
13898 LOG_TRACE_EVENT,
13899 "0349 rc should be "
13900 "MBX_SUCCESS\n");
13901 }
13902
13903 spin_lock_irqsave(&phba->hbalock, iflag);
13904 phba->work_ha |= work_ha_copy;
13905 spin_unlock_irqrestore(&phba->hbalock, iflag);
13906 lpfc_worker_wake_up(phba);
13907 }
13908 return IRQ_HANDLED;
13909 unplug_error:
13910 spin_unlock_irqrestore(&phba->hbalock, iflag);
13911 return IRQ_HANDLED;
13912
13913 } /* lpfc_sli_sp_intr_handler */
13914
13915 /**
13916 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13917 * @irq: Interrupt number.
13918 * @dev_id: The device context pointer.
13919 *
13920 * This function is directly called from the PCI layer as an interrupt
13921 * service routine when device with SLI-3 interface spec is enabled with
13922 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13923 * ring event in the HBA. However, when the device is enabled with either
13924 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13925 * device-level interrupt handler. When the PCI slot is in error recovery
13926 * or the HBA is undergoing initialization, the interrupt handler will not
13927 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13928 * the intrrupt context. This function is called without any lock held.
13929 * It gets the hbalock to access and update SLI data structures.
13930 *
13931 * This function returns IRQ_HANDLED when interrupt is handled else it
13932 * returns IRQ_NONE.
13933 **/
13934 irqreturn_t
lpfc_sli_fp_intr_handler(int irq,void * dev_id)13935 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13936 {
13937 struct lpfc_hba *phba;
13938 uint32_t ha_copy;
13939 unsigned long status;
13940 unsigned long iflag;
13941 struct lpfc_sli_ring *pring;
13942
13943 /* Get the driver's phba structure from the dev_id and
13944 * assume the HBA is not interrupting.
13945 */
13946 phba = (struct lpfc_hba *) dev_id;
13947
13948 if (unlikely(!phba))
13949 return IRQ_NONE;
13950
13951 /*
13952 * Stuff needs to be attented to when this function is invoked as an
13953 * individual interrupt handler in MSI-X multi-message interrupt mode
13954 */
13955 if (phba->intr_type == MSIX) {
13956 /* Check device state for handling interrupt */
13957 if (lpfc_intr_state_check(phba))
13958 return IRQ_NONE;
13959 /* Need to read HA REG for FCP ring and other ring events */
13960 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13961 return IRQ_HANDLED;
13962 /* Clear up only attention source related to fast-path */
13963 spin_lock_irqsave(&phba->hbalock, iflag);
13964 /*
13965 * If there is deferred error attention, do not check for
13966 * any interrupt.
13967 */
13968 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13969 spin_unlock_irqrestore(&phba->hbalock, iflag);
13970 return IRQ_NONE;
13971 }
13972 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13973 phba->HAregaddr);
13974 readl(phba->HAregaddr); /* flush */
13975 spin_unlock_irqrestore(&phba->hbalock, iflag);
13976 } else
13977 ha_copy = phba->ha_copy;
13978
13979 /*
13980 * Process all events on FCP ring. Take the optimized path for FCP IO.
13981 */
13982 ha_copy &= ~(phba->work_ha_mask);
13983
13984 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13985 status >>= (4*LPFC_FCP_RING);
13986 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13987 if (status & HA_RXMASK)
13988 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13989
13990 if (phba->cfg_multi_ring_support == 2) {
13991 /*
13992 * Process all events on extra ring. Take the optimized path
13993 * for extra ring IO.
13994 */
13995 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13996 status >>= (4*LPFC_EXTRA_RING);
13997 if (status & HA_RXMASK) {
13998 lpfc_sli_handle_fast_ring_event(phba,
13999 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
14000 status);
14001 }
14002 }
14003 return IRQ_HANDLED;
14004 } /* lpfc_sli_fp_intr_handler */
14005
14006 /**
14007 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
14008 * @irq: Interrupt number.
14009 * @dev_id: The device context pointer.
14010 *
14011 * This function is the HBA device-level interrupt handler to device with
14012 * SLI-3 interface spec, called from the PCI layer when either MSI or
14013 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
14014 * requires driver attention. This function invokes the slow-path interrupt
14015 * attention handling function and fast-path interrupt attention handling
14016 * function in turn to process the relevant HBA attention events. This
14017 * function is called without any lock held. It gets the hbalock to access
14018 * and update SLI data structures.
14019 *
14020 * This function returns IRQ_HANDLED when interrupt is handled, else it
14021 * returns IRQ_NONE.
14022 **/
14023 irqreturn_t
lpfc_sli_intr_handler(int irq,void * dev_id)14024 lpfc_sli_intr_handler(int irq, void *dev_id)
14025 {
14026 struct lpfc_hba *phba;
14027 irqreturn_t sp_irq_rc, fp_irq_rc;
14028 unsigned long status1, status2;
14029 uint32_t hc_copy;
14030
14031 /*
14032 * Get the driver's phba structure from the dev_id and
14033 * assume the HBA is not interrupting.
14034 */
14035 phba = (struct lpfc_hba *) dev_id;
14036
14037 if (unlikely(!phba))
14038 return IRQ_NONE;
14039
14040 /* Check device state for handling interrupt */
14041 if (lpfc_intr_state_check(phba))
14042 return IRQ_NONE;
14043
14044 spin_lock(&phba->hbalock);
14045 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
14046 spin_unlock(&phba->hbalock);
14047 return IRQ_HANDLED;
14048 }
14049
14050 if (unlikely(!phba->ha_copy)) {
14051 spin_unlock(&phba->hbalock);
14052 return IRQ_NONE;
14053 } else if (phba->ha_copy & HA_ERATT) {
14054 if (phba->hba_flag & HBA_ERATT_HANDLED)
14055 /* ERATT polling has handled ERATT */
14056 phba->ha_copy &= ~HA_ERATT;
14057 else
14058 /* Indicate interrupt handler handles ERATT */
14059 phba->hba_flag |= HBA_ERATT_HANDLED;
14060 }
14061
14062 /*
14063 * If there is deferred error attention, do not check for any interrupt.
14064 */
14065 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
14066 spin_unlock(&phba->hbalock);
14067 return IRQ_NONE;
14068 }
14069
14070 /* Clear attention sources except link and error attentions */
14071 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
14072 spin_unlock(&phba->hbalock);
14073 return IRQ_HANDLED;
14074 }
14075 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
14076 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
14077 phba->HCregaddr);
14078 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
14079 writel(hc_copy, phba->HCregaddr);
14080 readl(phba->HAregaddr); /* flush */
14081 spin_unlock(&phba->hbalock);
14082
14083 /*
14084 * Invokes slow-path host attention interrupt handling as appropriate.
14085 */
14086
14087 /* status of events with mailbox and link attention */
14088 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
14089
14090 /* status of events with ELS ring */
14091 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
14092 status2 >>= (4*LPFC_ELS_RING);
14093
14094 if (status1 || (status2 & HA_RXMASK))
14095 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
14096 else
14097 sp_irq_rc = IRQ_NONE;
14098
14099 /*
14100 * Invoke fast-path host attention interrupt handling as appropriate.
14101 */
14102
14103 /* status of events with FCP ring */
14104 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14105 status1 >>= (4*LPFC_FCP_RING);
14106
14107 /* status of events with extra ring */
14108 if (phba->cfg_multi_ring_support == 2) {
14109 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14110 status2 >>= (4*LPFC_EXTRA_RING);
14111 } else
14112 status2 = 0;
14113
14114 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
14115 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
14116 else
14117 fp_irq_rc = IRQ_NONE;
14118
14119 /* Return device-level interrupt handling status */
14120 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
14121 } /* lpfc_sli_intr_handler */
14122
14123 /**
14124 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
14125 * @phba: pointer to lpfc hba data structure.
14126 *
14127 * This routine is invoked by the worker thread to process all the pending
14128 * SLI4 els abort xri events.
14129 **/
lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba * phba)14130 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
14131 {
14132 struct lpfc_cq_event *cq_event;
14133 unsigned long iflags;
14134
14135 /* First, declare the els xri abort event has been handled */
14136 spin_lock_irqsave(&phba->hbalock, iflags);
14137 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
14138 spin_unlock_irqrestore(&phba->hbalock, iflags);
14139
14140 /* Now, handle all the els xri abort events */
14141 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14142 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
14143 /* Get the first event from the head of the event queue */
14144 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
14145 cq_event, struct lpfc_cq_event, list);
14146 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14147 iflags);
14148 /* Notify aborted XRI for ELS work queue */
14149 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
14150
14151 /* Free the event processed back to the free pool */
14152 lpfc_sli4_cq_event_release(phba, cq_event);
14153 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14154 iflags);
14155 }
14156 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14157 }
14158
14159 /**
14160 * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
14161 * @phba: Pointer to HBA context object.
14162 * @irspiocbq: Pointer to work-queue completion queue entry.
14163 *
14164 * This routine handles an ELS work-queue completion event and construct
14165 * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
14166 * discovery engine to handle.
14167 *
14168 * Return: Pointer to the receive IOCBQ, NULL otherwise.
14169 **/
14170 static struct lpfc_iocbq *
lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba * phba,struct lpfc_iocbq * irspiocbq)14171 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
14172 struct lpfc_iocbq *irspiocbq)
14173 {
14174 struct lpfc_sli_ring *pring;
14175 struct lpfc_iocbq *cmdiocbq;
14176 struct lpfc_wcqe_complete *wcqe;
14177 unsigned long iflags;
14178
14179 pring = lpfc_phba_elsring(phba);
14180 if (unlikely(!pring))
14181 return NULL;
14182
14183 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
14184 spin_lock_irqsave(&pring->ring_lock, iflags);
14185 pring->stats.iocb_event++;
14186 /* Look up the ELS command IOCB and create pseudo response IOCB */
14187 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14188 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14189 if (unlikely(!cmdiocbq)) {
14190 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14191 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14192 "0386 ELS complete with no corresponding "
14193 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
14194 wcqe->word0, wcqe->total_data_placed,
14195 wcqe->parameter, wcqe->word3);
14196 lpfc_sli_release_iocbq(phba, irspiocbq);
14197 return NULL;
14198 }
14199
14200 memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
14201 memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
14202
14203 /* Put the iocb back on the txcmplq */
14204 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
14205 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14206
14207 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14208 spin_lock_irqsave(&phba->hbalock, iflags);
14209 irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14210 spin_unlock_irqrestore(&phba->hbalock, iflags);
14211 }
14212
14213 return irspiocbq;
14214 }
14215
14216 inline struct lpfc_cq_event *
lpfc_cq_event_setup(struct lpfc_hba * phba,void * entry,int size)14217 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
14218 {
14219 struct lpfc_cq_event *cq_event;
14220
14221 /* Allocate a new internal CQ_EVENT entry */
14222 cq_event = lpfc_sli4_cq_event_alloc(phba);
14223 if (!cq_event) {
14224 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14225 "0602 Failed to alloc CQ_EVENT entry\n");
14226 return NULL;
14227 }
14228
14229 /* Move the CQE into the event */
14230 memcpy(&cq_event->cqe, entry, size);
14231 return cq_event;
14232 }
14233
14234 /**
14235 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
14236 * @phba: Pointer to HBA context object.
14237 * @mcqe: Pointer to mailbox completion queue entry.
14238 *
14239 * This routine process a mailbox completion queue entry with asynchronous
14240 * event.
14241 *
14242 * Return: true if work posted to worker thread, otherwise false.
14243 **/
14244 static bool
lpfc_sli4_sp_handle_async_event(struct lpfc_hba * phba,struct lpfc_mcqe * mcqe)14245 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14246 {
14247 struct lpfc_cq_event *cq_event;
14248 unsigned long iflags;
14249
14250 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14251 "0392 Async Event: word0:x%x, word1:x%x, "
14252 "word2:x%x, word3:x%x\n", mcqe->word0,
14253 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
14254
14255 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
14256 if (!cq_event)
14257 return false;
14258
14259 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
14260 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
14261 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
14262
14263 /* Set the async event flag */
14264 spin_lock_irqsave(&phba->hbalock, iflags);
14265 phba->hba_flag |= ASYNC_EVENT;
14266 spin_unlock_irqrestore(&phba->hbalock, iflags);
14267
14268 return true;
14269 }
14270
14271 /**
14272 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14273 * @phba: Pointer to HBA context object.
14274 * @mcqe: Pointer to mailbox completion queue entry.
14275 *
14276 * This routine process a mailbox completion queue entry with mailbox
14277 * completion event.
14278 *
14279 * Return: true if work posted to worker thread, otherwise false.
14280 **/
14281 static bool
lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba * phba,struct lpfc_mcqe * mcqe)14282 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14283 {
14284 uint32_t mcqe_status;
14285 MAILBOX_t *mbox, *pmbox;
14286 struct lpfc_mqe *mqe;
14287 struct lpfc_vport *vport;
14288 struct lpfc_nodelist *ndlp;
14289 struct lpfc_dmabuf *mp;
14290 unsigned long iflags;
14291 LPFC_MBOXQ_t *pmb;
14292 bool workposted = false;
14293 int rc;
14294
14295 /* If not a mailbox complete MCQE, out by checking mailbox consume */
14296 if (!bf_get(lpfc_trailer_completed, mcqe))
14297 goto out_no_mqe_complete;
14298
14299 /* Get the reference to the active mbox command */
14300 spin_lock_irqsave(&phba->hbalock, iflags);
14301 pmb = phba->sli.mbox_active;
14302 if (unlikely(!pmb)) {
14303 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14304 "1832 No pending MBOX command to handle\n");
14305 spin_unlock_irqrestore(&phba->hbalock, iflags);
14306 goto out_no_mqe_complete;
14307 }
14308 spin_unlock_irqrestore(&phba->hbalock, iflags);
14309 mqe = &pmb->u.mqe;
14310 pmbox = (MAILBOX_t *)&pmb->u.mqe;
14311 mbox = phba->mbox;
14312 vport = pmb->vport;
14313
14314 /* Reset heartbeat timer */
14315 phba->last_completion_time = jiffies;
14316 del_timer(&phba->sli.mbox_tmo);
14317
14318 /* Move mbox data to caller's mailbox region, do endian swapping */
14319 if (pmb->mbox_cmpl && mbox)
14320 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14321
14322 /*
14323 * For mcqe errors, conditionally move a modified error code to
14324 * the mbox so that the error will not be missed.
14325 */
14326 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14327 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14328 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14329 bf_set(lpfc_mqe_status, mqe,
14330 (LPFC_MBX_ERROR_RANGE | mcqe_status));
14331 }
14332 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14333 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14334 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14335 "MBOX dflt rpi: status:x%x rpi:x%x",
14336 mcqe_status,
14337 pmbox->un.varWords[0], 0);
14338 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14339 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
14340 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14341
14342 /* Reg_LOGIN of dflt RPI was successful. Mark the
14343 * node as having an UNREG_LOGIN in progress to stop
14344 * an unsolicited PLOGI from the same NPortId from
14345 * starting another mailbox transaction.
14346 */
14347 spin_lock_irqsave(&ndlp->lock, iflags);
14348 ndlp->nlp_flag |= NLP_UNREG_INP;
14349 spin_unlock_irqrestore(&ndlp->lock, iflags);
14350 lpfc_unreg_login(phba, vport->vpi,
14351 pmbox->un.varWords[0], pmb);
14352 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14353 pmb->ctx_buf = mp;
14354
14355 /* No reference taken here. This is a default
14356 * RPI reg/immediate unreg cycle. The reference was
14357 * taken in the reg rpi path and is released when
14358 * this mailbox completes.
14359 */
14360 pmb->ctx_ndlp = ndlp;
14361 pmb->vport = vport;
14362 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14363 if (rc != MBX_BUSY)
14364 lpfc_printf_log(phba, KERN_ERR,
14365 LOG_TRACE_EVENT,
14366 "0385 rc should "
14367 "have been MBX_BUSY\n");
14368 if (rc != MBX_NOT_FINISHED)
14369 goto send_current_mbox;
14370 }
14371 }
14372 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14373 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14374 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14375
14376 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14377 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14378 spin_lock_irqsave(&phba->hbalock, iflags);
14379 /* Release the mailbox command posting token */
14380 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14381 phba->sli.mbox_active = NULL;
14382 if (bf_get(lpfc_trailer_consumed, mcqe))
14383 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14384 spin_unlock_irqrestore(&phba->hbalock, iflags);
14385
14386 /* Post the next mbox command, if there is one */
14387 lpfc_sli4_post_async_mbox(phba);
14388
14389 /* Process cmpl now */
14390 if (pmb->mbox_cmpl)
14391 pmb->mbox_cmpl(phba, pmb);
14392 return false;
14393 }
14394
14395 /* There is mailbox completion work to queue to the worker thread */
14396 spin_lock_irqsave(&phba->hbalock, iflags);
14397 __lpfc_mbox_cmpl_put(phba, pmb);
14398 phba->work_ha |= HA_MBATT;
14399 spin_unlock_irqrestore(&phba->hbalock, iflags);
14400 workposted = true;
14401
14402 send_current_mbox:
14403 spin_lock_irqsave(&phba->hbalock, iflags);
14404 /* Release the mailbox command posting token */
14405 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14406 /* Setting active mailbox pointer need to be in sync to flag clear */
14407 phba->sli.mbox_active = NULL;
14408 if (bf_get(lpfc_trailer_consumed, mcqe))
14409 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14410 spin_unlock_irqrestore(&phba->hbalock, iflags);
14411 /* Wake up worker thread to post the next pending mailbox command */
14412 lpfc_worker_wake_up(phba);
14413 return workposted;
14414
14415 out_no_mqe_complete:
14416 spin_lock_irqsave(&phba->hbalock, iflags);
14417 if (bf_get(lpfc_trailer_consumed, mcqe))
14418 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14419 spin_unlock_irqrestore(&phba->hbalock, iflags);
14420 return false;
14421 }
14422
14423 /**
14424 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14425 * @phba: Pointer to HBA context object.
14426 * @cq: Pointer to associated CQ
14427 * @cqe: Pointer to mailbox completion queue entry.
14428 *
14429 * This routine process a mailbox completion queue entry, it invokes the
14430 * proper mailbox complete handling or asynchronous event handling routine
14431 * according to the MCQE's async bit.
14432 *
14433 * Return: true if work posted to worker thread, otherwise false.
14434 **/
14435 static bool
lpfc_sli4_sp_handle_mcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)14436 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14437 struct lpfc_cqe *cqe)
14438 {
14439 struct lpfc_mcqe mcqe;
14440 bool workposted;
14441
14442 cq->CQ_mbox++;
14443
14444 /* Copy the mailbox MCQE and convert endian order as needed */
14445 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14446
14447 /* Invoke the proper event handling routine */
14448 if (!bf_get(lpfc_trailer_async, &mcqe))
14449 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14450 else
14451 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14452 return workposted;
14453 }
14454
14455 /**
14456 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14457 * @phba: Pointer to HBA context object.
14458 * @cq: Pointer to associated CQ
14459 * @wcqe: Pointer to work-queue completion queue entry.
14460 *
14461 * This routine handles an ELS work-queue completion event.
14462 *
14463 * Return: true if work posted to worker thread, otherwise false.
14464 **/
14465 static bool
lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_complete * wcqe)14466 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14467 struct lpfc_wcqe_complete *wcqe)
14468 {
14469 struct lpfc_iocbq *irspiocbq;
14470 unsigned long iflags;
14471 struct lpfc_sli_ring *pring = cq->pring;
14472 int txq_cnt = 0;
14473 int txcmplq_cnt = 0;
14474
14475 /* Check for response status */
14476 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14477 /* Log the error status */
14478 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14479 "0357 ELS CQE error: status=x%x: "
14480 "CQE: %08x %08x %08x %08x\n",
14481 bf_get(lpfc_wcqe_c_status, wcqe),
14482 wcqe->word0, wcqe->total_data_placed,
14483 wcqe->parameter, wcqe->word3);
14484 }
14485
14486 /* Get an irspiocbq for later ELS response processing use */
14487 irspiocbq = lpfc_sli_get_iocbq(phba);
14488 if (!irspiocbq) {
14489 if (!list_empty(&pring->txq))
14490 txq_cnt++;
14491 if (!list_empty(&pring->txcmplq))
14492 txcmplq_cnt++;
14493 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14494 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14495 "els_txcmplq_cnt=%d\n",
14496 txq_cnt, phba->iocb_cnt,
14497 txcmplq_cnt);
14498 return false;
14499 }
14500
14501 /* Save off the slow-path queue event for work thread to process */
14502 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14503 spin_lock_irqsave(&phba->hbalock, iflags);
14504 list_add_tail(&irspiocbq->cq_event.list,
14505 &phba->sli4_hba.sp_queue_event);
14506 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14507 spin_unlock_irqrestore(&phba->hbalock, iflags);
14508
14509 return true;
14510 }
14511
14512 /**
14513 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14514 * @phba: Pointer to HBA context object.
14515 * @wcqe: Pointer to work-queue completion queue entry.
14516 *
14517 * This routine handles slow-path WQ entry consumed event by invoking the
14518 * proper WQ release routine to the slow-path WQ.
14519 **/
14520 static void
lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba * phba,struct lpfc_wcqe_release * wcqe)14521 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14522 struct lpfc_wcqe_release *wcqe)
14523 {
14524 /* sanity check on queue memory */
14525 if (unlikely(!phba->sli4_hba.els_wq))
14526 return;
14527 /* Check for the slow-path ELS work queue */
14528 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14529 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14530 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14531 else
14532 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14533 "2579 Slow-path wqe consume event carries "
14534 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14535 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14536 phba->sli4_hba.els_wq->queue_id);
14537 }
14538
14539 /**
14540 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14541 * @phba: Pointer to HBA context object.
14542 * @cq: Pointer to a WQ completion queue.
14543 * @wcqe: Pointer to work-queue completion queue entry.
14544 *
14545 * This routine handles an XRI abort event.
14546 *
14547 * Return: true if work posted to worker thread, otherwise false.
14548 **/
14549 static bool
lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct sli4_wcqe_xri_aborted * wcqe)14550 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14551 struct lpfc_queue *cq,
14552 struct sli4_wcqe_xri_aborted *wcqe)
14553 {
14554 bool workposted = false;
14555 struct lpfc_cq_event *cq_event;
14556 unsigned long iflags;
14557
14558 switch (cq->subtype) {
14559 case LPFC_IO:
14560 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14561 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14562 /* Notify aborted XRI for NVME work queue */
14563 if (phba->nvmet_support)
14564 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14565 }
14566 workposted = false;
14567 break;
14568 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14569 case LPFC_ELS:
14570 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14571 if (!cq_event) {
14572 workposted = false;
14573 break;
14574 }
14575 cq_event->hdwq = cq->hdwq;
14576 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14577 iflags);
14578 list_add_tail(&cq_event->list,
14579 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14580 /* Set the els xri abort event flag */
14581 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14582 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14583 iflags);
14584 workposted = true;
14585 break;
14586 default:
14587 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14588 "0603 Invalid CQ subtype %d: "
14589 "%08x %08x %08x %08x\n",
14590 cq->subtype, wcqe->word0, wcqe->parameter,
14591 wcqe->word2, wcqe->word3);
14592 workposted = false;
14593 break;
14594 }
14595 return workposted;
14596 }
14597
14598 #define FC_RCTL_MDS_DIAGS 0xF4
14599
14600 /**
14601 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14602 * @phba: Pointer to HBA context object.
14603 * @rcqe: Pointer to receive-queue completion queue entry.
14604 *
14605 * This routine process a receive-queue completion queue entry.
14606 *
14607 * Return: true if work posted to worker thread, otherwise false.
14608 **/
14609 static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba * phba,struct lpfc_rcqe * rcqe)14610 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14611 {
14612 bool workposted = false;
14613 struct fc_frame_header *fc_hdr;
14614 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14615 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14616 struct lpfc_nvmet_tgtport *tgtp;
14617 struct hbq_dmabuf *dma_buf;
14618 uint32_t status, rq_id;
14619 unsigned long iflags;
14620
14621 /* sanity check on queue memory */
14622 if (unlikely(!hrq) || unlikely(!drq))
14623 return workposted;
14624
14625 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14626 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14627 else
14628 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14629 if (rq_id != hrq->queue_id)
14630 goto out;
14631
14632 status = bf_get(lpfc_rcqe_status, rcqe);
14633 switch (status) {
14634 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14636 "2537 Receive Frame Truncated!!\n");
14637 fallthrough;
14638 case FC_STATUS_RQ_SUCCESS:
14639 spin_lock_irqsave(&phba->hbalock, iflags);
14640 lpfc_sli4_rq_release(hrq, drq);
14641 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14642 if (!dma_buf) {
14643 hrq->RQ_no_buf_found++;
14644 spin_unlock_irqrestore(&phba->hbalock, iflags);
14645 goto out;
14646 }
14647 hrq->RQ_rcv_buf++;
14648 hrq->RQ_buf_posted--;
14649 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14650
14651 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14652
14653 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14654 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14655 spin_unlock_irqrestore(&phba->hbalock, iflags);
14656 /* Handle MDS Loopback frames */
14657 if (!(phba->pport->load_flag & FC_UNLOADING))
14658 lpfc_sli4_handle_mds_loopback(phba->pport,
14659 dma_buf);
14660 else
14661 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14662 break;
14663 }
14664
14665 /* save off the frame for the work thread to process */
14666 list_add_tail(&dma_buf->cq_event.list,
14667 &phba->sli4_hba.sp_queue_event);
14668 /* Frame received */
14669 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14670 spin_unlock_irqrestore(&phba->hbalock, iflags);
14671 workposted = true;
14672 break;
14673 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14674 if (phba->nvmet_support) {
14675 tgtp = phba->targetport->private;
14676 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14677 "6402 RQE Error x%x, posted %d err_cnt "
14678 "%d: %x %x %x\n",
14679 status, hrq->RQ_buf_posted,
14680 hrq->RQ_no_posted_buf,
14681 atomic_read(&tgtp->rcv_fcp_cmd_in),
14682 atomic_read(&tgtp->rcv_fcp_cmd_out),
14683 atomic_read(&tgtp->xmt_fcp_release));
14684 }
14685 fallthrough;
14686
14687 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14688 hrq->RQ_no_posted_buf++;
14689 /* Post more buffers if possible */
14690 spin_lock_irqsave(&phba->hbalock, iflags);
14691 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14692 spin_unlock_irqrestore(&phba->hbalock, iflags);
14693 workposted = true;
14694 break;
14695 case FC_STATUS_RQ_DMA_FAILURE:
14696 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14697 "2564 RQE DMA Error x%x, x%08x x%08x x%08x "
14698 "x%08x\n",
14699 status, rcqe->word0, rcqe->word1,
14700 rcqe->word2, rcqe->word3);
14701
14702 /* If IV set, no further recovery */
14703 if (bf_get(lpfc_rcqe_iv, rcqe))
14704 break;
14705
14706 /* recycle consumed resource */
14707 spin_lock_irqsave(&phba->hbalock, iflags);
14708 lpfc_sli4_rq_release(hrq, drq);
14709 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14710 if (!dma_buf) {
14711 hrq->RQ_no_buf_found++;
14712 spin_unlock_irqrestore(&phba->hbalock, iflags);
14713 break;
14714 }
14715 hrq->RQ_rcv_buf++;
14716 hrq->RQ_buf_posted--;
14717 spin_unlock_irqrestore(&phba->hbalock, iflags);
14718 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14719 break;
14720 default:
14721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14722 "2565 Unexpected RQE Status x%x, w0-3 x%08x "
14723 "x%08x x%08x x%08x\n",
14724 status, rcqe->word0, rcqe->word1,
14725 rcqe->word2, rcqe->word3);
14726 break;
14727 }
14728 out:
14729 return workposted;
14730 }
14731
14732 /**
14733 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14734 * @phba: Pointer to HBA context object.
14735 * @cq: Pointer to the completion queue.
14736 * @cqe: Pointer to a completion queue entry.
14737 *
14738 * This routine process a slow-path work-queue or receive queue completion queue
14739 * entry.
14740 *
14741 * Return: true if work posted to worker thread, otherwise false.
14742 **/
14743 static bool
lpfc_sli4_sp_handle_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)14744 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14745 struct lpfc_cqe *cqe)
14746 {
14747 struct lpfc_cqe cqevt;
14748 bool workposted = false;
14749
14750 /* Copy the work queue CQE and convert endian order if needed */
14751 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14752
14753 /* Check and process for different type of WCQE and dispatch */
14754 switch (bf_get(lpfc_cqe_code, &cqevt)) {
14755 case CQE_CODE_COMPL_WQE:
14756 /* Process the WQ/RQ complete event */
14757 phba->last_completion_time = jiffies;
14758 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14759 (struct lpfc_wcqe_complete *)&cqevt);
14760 break;
14761 case CQE_CODE_RELEASE_WQE:
14762 /* Process the WQ release event */
14763 lpfc_sli4_sp_handle_rel_wcqe(phba,
14764 (struct lpfc_wcqe_release *)&cqevt);
14765 break;
14766 case CQE_CODE_XRI_ABORTED:
14767 /* Process the WQ XRI abort event */
14768 phba->last_completion_time = jiffies;
14769 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14770 (struct sli4_wcqe_xri_aborted *)&cqevt);
14771 break;
14772 case CQE_CODE_RECEIVE:
14773 case CQE_CODE_RECEIVE_V1:
14774 /* Process the RQ event */
14775 phba->last_completion_time = jiffies;
14776 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14777 (struct lpfc_rcqe *)&cqevt);
14778 break;
14779 default:
14780 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14781 "0388 Not a valid WCQE code: x%x\n",
14782 bf_get(lpfc_cqe_code, &cqevt));
14783 break;
14784 }
14785 return workposted;
14786 }
14787
14788 /**
14789 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14790 * @phba: Pointer to HBA context object.
14791 * @eqe: Pointer to fast-path event queue entry.
14792 * @speq: Pointer to slow-path event queue.
14793 *
14794 * This routine process a event queue entry from the slow-path event queue.
14795 * It will check the MajorCode and MinorCode to determine this is for a
14796 * completion event on a completion queue, if not, an error shall be logged
14797 * and just return. Otherwise, it will get to the corresponding completion
14798 * queue and process all the entries on that completion queue, rearm the
14799 * completion queue, and then return.
14800 *
14801 **/
14802 static void
lpfc_sli4_sp_handle_eqe(struct lpfc_hba * phba,struct lpfc_eqe * eqe,struct lpfc_queue * speq)14803 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14804 struct lpfc_queue *speq)
14805 {
14806 struct lpfc_queue *cq = NULL, *childq;
14807 uint16_t cqid;
14808 int ret = 0;
14809
14810 /* Get the reference to the corresponding CQ */
14811 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14812
14813 list_for_each_entry(childq, &speq->child_list, list) {
14814 if (childq->queue_id == cqid) {
14815 cq = childq;
14816 break;
14817 }
14818 }
14819 if (unlikely(!cq)) {
14820 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14822 "0365 Slow-path CQ identifier "
14823 "(%d) does not exist\n", cqid);
14824 return;
14825 }
14826
14827 /* Save EQ associated with this CQ */
14828 cq->assoc_qp = speq;
14829
14830 if (is_kdump_kernel())
14831 ret = queue_work(phba->wq, &cq->spwork);
14832 else
14833 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14834
14835 if (!ret)
14836 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14837 "0390 Cannot schedule queue work "
14838 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14839 cqid, cq->queue_id, raw_smp_processor_id());
14840 }
14841
14842 /**
14843 * __lpfc_sli4_process_cq - Process elements of a CQ
14844 * @phba: Pointer to HBA context object.
14845 * @cq: Pointer to CQ to be processed
14846 * @handler: Routine to process each cqe
14847 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14848 *
14849 * This routine processes completion queue entries in a CQ. While a valid
14850 * queue element is found, the handler is called. During processing checks
14851 * are made for periodic doorbell writes to let the hardware know of
14852 * element consumption.
14853 *
14854 * If the max limit on cqes to process is hit, or there are no more valid
14855 * entries, the loop stops. If we processed a sufficient number of elements,
14856 * meaning there is sufficient load, rather than rearming and generating
14857 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14858 * indicates no rescheduling.
14859 *
14860 * Returns True if work scheduled, False otherwise.
14861 **/
14862 static bool
__lpfc_sli4_process_cq(struct lpfc_hba * phba,struct lpfc_queue * cq,bool (* handler)(struct lpfc_hba *,struct lpfc_queue *,struct lpfc_cqe *),unsigned long * delay)14863 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14864 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14865 struct lpfc_cqe *), unsigned long *delay)
14866 {
14867 struct lpfc_cqe *cqe;
14868 bool workposted = false;
14869 int count = 0, consumed = 0;
14870 bool arm = true;
14871
14872 /* default - no reschedule */
14873 *delay = 0;
14874
14875 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14876 goto rearm_and_exit;
14877
14878 /* Process all the entries to the CQ */
14879 cq->q_flag = 0;
14880 cqe = lpfc_sli4_cq_get(cq);
14881 while (cqe) {
14882 workposted |= handler(phba, cq, cqe);
14883 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14884
14885 consumed++;
14886 if (!(++count % cq->max_proc_limit))
14887 break;
14888
14889 if (!(count % cq->notify_interval)) {
14890 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14891 LPFC_QUEUE_NOARM);
14892 consumed = 0;
14893 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14894 }
14895
14896 if (count == LPFC_NVMET_CQ_NOTIFY)
14897 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14898
14899 cqe = lpfc_sli4_cq_get(cq);
14900 }
14901 if (count >= phba->cfg_cq_poll_threshold) {
14902 *delay = 1;
14903 arm = false;
14904 }
14905
14906 /* Track the max number of CQEs processed in 1 EQ */
14907 if (count > cq->CQ_max_cqe)
14908 cq->CQ_max_cqe = count;
14909
14910 cq->assoc_qp->EQ_cqe_cnt += count;
14911
14912 /* Catch the no cq entry condition */
14913 if (unlikely(count == 0))
14914 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14915 "0369 No entry from completion queue "
14916 "qid=%d\n", cq->queue_id);
14917
14918 xchg(&cq->queue_claimed, 0);
14919
14920 rearm_and_exit:
14921 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14922 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14923
14924 return workposted;
14925 }
14926
14927 /**
14928 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14929 * @cq: pointer to CQ to process
14930 *
14931 * This routine calls the cq processing routine with a handler specific
14932 * to the type of queue bound to it.
14933 *
14934 * The CQ routine returns two values: the first is the calling status,
14935 * which indicates whether work was queued to the background discovery
14936 * thread. If true, the routine should wakeup the discovery thread;
14937 * the second is the delay parameter. If non-zero, rather than rearming
14938 * the CQ and yet another interrupt, the CQ handler should be queued so
14939 * that it is processed in a subsequent polling action. The value of
14940 * the delay indicates when to reschedule it.
14941 **/
14942 static void
__lpfc_sli4_sp_process_cq(struct lpfc_queue * cq)14943 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14944 {
14945 struct lpfc_hba *phba = cq->phba;
14946 unsigned long delay;
14947 bool workposted = false;
14948 int ret = 0;
14949
14950 /* Process and rearm the CQ */
14951 switch (cq->type) {
14952 case LPFC_MCQ:
14953 workposted |= __lpfc_sli4_process_cq(phba, cq,
14954 lpfc_sli4_sp_handle_mcqe,
14955 &delay);
14956 break;
14957 case LPFC_WCQ:
14958 if (cq->subtype == LPFC_IO)
14959 workposted |= __lpfc_sli4_process_cq(phba, cq,
14960 lpfc_sli4_fp_handle_cqe,
14961 &delay);
14962 else
14963 workposted |= __lpfc_sli4_process_cq(phba, cq,
14964 lpfc_sli4_sp_handle_cqe,
14965 &delay);
14966 break;
14967 default:
14968 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14969 "0370 Invalid completion queue type (%d)\n",
14970 cq->type);
14971 return;
14972 }
14973
14974 if (delay) {
14975 if (is_kdump_kernel())
14976 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14977 delay);
14978 else
14979 ret = queue_delayed_work_on(cq->chann, phba->wq,
14980 &cq->sched_spwork, delay);
14981 if (!ret)
14982 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14983 "0394 Cannot schedule queue work "
14984 "for cqid=%d on CPU %d\n",
14985 cq->queue_id, cq->chann);
14986 }
14987
14988 /* wake up worker thread if there are works to be done */
14989 if (workposted)
14990 lpfc_worker_wake_up(phba);
14991 }
14992
14993 /**
14994 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14995 * interrupt
14996 * @work: pointer to work element
14997 *
14998 * translates from the work handler and calls the slow-path handler.
14999 **/
15000 static void
lpfc_sli4_sp_process_cq(struct work_struct * work)15001 lpfc_sli4_sp_process_cq(struct work_struct *work)
15002 {
15003 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
15004
15005 __lpfc_sli4_sp_process_cq(cq);
15006 }
15007
15008 /**
15009 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
15010 * @work: pointer to work element
15011 *
15012 * translates from the work handler and calls the slow-path handler.
15013 **/
15014 static void
lpfc_sli4_dly_sp_process_cq(struct work_struct * work)15015 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
15016 {
15017 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15018 struct lpfc_queue, sched_spwork);
15019
15020 __lpfc_sli4_sp_process_cq(cq);
15021 }
15022
15023 /**
15024 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
15025 * @phba: Pointer to HBA context object.
15026 * @cq: Pointer to associated CQ
15027 * @wcqe: Pointer to work-queue completion queue entry.
15028 *
15029 * This routine process a fast-path work queue completion entry from fast-path
15030 * event queue for FCP command response completion.
15031 **/
15032 static void
lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_complete * wcqe)15033 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15034 struct lpfc_wcqe_complete *wcqe)
15035 {
15036 struct lpfc_sli_ring *pring = cq->pring;
15037 struct lpfc_iocbq *cmdiocbq;
15038 unsigned long iflags;
15039
15040 /* Check for response status */
15041 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
15042 /* If resource errors reported from HBA, reduce queue
15043 * depth of the SCSI device.
15044 */
15045 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
15046 IOSTAT_LOCAL_REJECT)) &&
15047 ((wcqe->parameter & IOERR_PARAM_MASK) ==
15048 IOERR_NO_RESOURCES))
15049 phba->lpfc_rampdown_queue_depth(phba);
15050
15051 /* Log the cmpl status */
15052 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15053 "0373 FCP CQE cmpl: status=x%x: "
15054 "CQE: %08x %08x %08x %08x\n",
15055 bf_get(lpfc_wcqe_c_status, wcqe),
15056 wcqe->word0, wcqe->total_data_placed,
15057 wcqe->parameter, wcqe->word3);
15058 }
15059
15060 /* Look up the FCP command IOCB and create pseudo response IOCB */
15061 spin_lock_irqsave(&pring->ring_lock, iflags);
15062 pring->stats.iocb_event++;
15063 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
15064 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15065 spin_unlock_irqrestore(&pring->ring_lock, iflags);
15066 if (unlikely(!cmdiocbq)) {
15067 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15068 "0374 FCP complete with no corresponding "
15069 "cmdiocb: iotag (%d)\n",
15070 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15071 return;
15072 }
15073 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
15074 cmdiocbq->isr_timestamp = cq->isr_timestamp;
15075 #endif
15076 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
15077 spin_lock_irqsave(&phba->hbalock, iflags);
15078 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
15079 spin_unlock_irqrestore(&phba->hbalock, iflags);
15080 }
15081
15082 if (cmdiocbq->cmd_cmpl) {
15083 /* For FCP the flag is cleared in cmd_cmpl */
15084 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
15085 cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
15086 spin_lock_irqsave(&phba->hbalock, iflags);
15087 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
15088 spin_unlock_irqrestore(&phba->hbalock, iflags);
15089 }
15090
15091 /* Pass the cmd_iocb and the wcqe to the upper layer */
15092 memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
15093 sizeof(struct lpfc_wcqe_complete));
15094 cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
15095 } else {
15096 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15097 "0375 FCP cmdiocb not callback function "
15098 "iotag: (%d)\n",
15099 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15100 }
15101 }
15102
15103 /**
15104 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
15105 * @phba: Pointer to HBA context object.
15106 * @cq: Pointer to completion queue.
15107 * @wcqe: Pointer to work-queue completion queue entry.
15108 *
15109 * This routine handles an fast-path WQ entry consumed event by invoking the
15110 * proper WQ release routine to the slow-path WQ.
15111 **/
15112 static void
lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_release * wcqe)15113 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15114 struct lpfc_wcqe_release *wcqe)
15115 {
15116 struct lpfc_queue *childwq;
15117 bool wqid_matched = false;
15118 uint16_t hba_wqid;
15119
15120 /* Check for fast-path FCP work queue release */
15121 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
15122 list_for_each_entry(childwq, &cq->child_list, list) {
15123 if (childwq->queue_id == hba_wqid) {
15124 lpfc_sli4_wq_release(childwq,
15125 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
15126 if (childwq->q_flag & HBA_NVMET_WQFULL)
15127 lpfc_nvmet_wqfull_process(phba, childwq);
15128 wqid_matched = true;
15129 break;
15130 }
15131 }
15132 /* Report warning log message if no match found */
15133 if (wqid_matched != true)
15134 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15135 "2580 Fast-path wqe consume event carries "
15136 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
15137 }
15138
15139 /**
15140 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
15141 * @phba: Pointer to HBA context object.
15142 * @cq: Pointer to completion queue.
15143 * @rcqe: Pointer to receive-queue completion queue entry.
15144 *
15145 * This routine process a receive-queue completion queue entry.
15146 *
15147 * Return: true if work posted to worker thread, otherwise false.
15148 **/
15149 static bool
lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_rcqe * rcqe)15150 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15151 struct lpfc_rcqe *rcqe)
15152 {
15153 bool workposted = false;
15154 struct lpfc_queue *hrq;
15155 struct lpfc_queue *drq;
15156 struct rqb_dmabuf *dma_buf;
15157 struct fc_frame_header *fc_hdr;
15158 struct lpfc_nvmet_tgtport *tgtp;
15159 uint32_t status, rq_id;
15160 unsigned long iflags;
15161 uint32_t fctl, idx;
15162
15163 if ((phba->nvmet_support == 0) ||
15164 (phba->sli4_hba.nvmet_cqset == NULL))
15165 return workposted;
15166
15167 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
15168 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
15169 drq = phba->sli4_hba.nvmet_mrq_data[idx];
15170
15171 /* sanity check on queue memory */
15172 if (unlikely(!hrq) || unlikely(!drq))
15173 return workposted;
15174
15175 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
15176 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
15177 else
15178 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
15179
15180 if ((phba->nvmet_support == 0) ||
15181 (rq_id != hrq->queue_id))
15182 return workposted;
15183
15184 status = bf_get(lpfc_rcqe_status, rcqe);
15185 switch (status) {
15186 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
15187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15188 "6126 Receive Frame Truncated!!\n");
15189 fallthrough;
15190 case FC_STATUS_RQ_SUCCESS:
15191 spin_lock_irqsave(&phba->hbalock, iflags);
15192 lpfc_sli4_rq_release(hrq, drq);
15193 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15194 if (!dma_buf) {
15195 hrq->RQ_no_buf_found++;
15196 spin_unlock_irqrestore(&phba->hbalock, iflags);
15197 goto out;
15198 }
15199 spin_unlock_irqrestore(&phba->hbalock, iflags);
15200 hrq->RQ_rcv_buf++;
15201 hrq->RQ_buf_posted--;
15202 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
15203
15204 /* Just some basic sanity checks on FCP Command frame */
15205 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
15206 fc_hdr->fh_f_ctl[1] << 8 |
15207 fc_hdr->fh_f_ctl[2]);
15208 if (((fctl &
15209 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
15210 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
15211 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
15212 goto drop;
15213
15214 if (fc_hdr->fh_type == FC_TYPE_FCP) {
15215 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
15216 lpfc_nvmet_unsol_fcp_event(
15217 phba, idx, dma_buf, cq->isr_timestamp,
15218 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
15219 return false;
15220 }
15221 drop:
15222 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15223 break;
15224 case FC_STATUS_INSUFF_BUF_FRM_DISC:
15225 if (phba->nvmet_support) {
15226 tgtp = phba->targetport->private;
15227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15228 "6401 RQE Error x%x, posted %d err_cnt "
15229 "%d: %x %x %x\n",
15230 status, hrq->RQ_buf_posted,
15231 hrq->RQ_no_posted_buf,
15232 atomic_read(&tgtp->rcv_fcp_cmd_in),
15233 atomic_read(&tgtp->rcv_fcp_cmd_out),
15234 atomic_read(&tgtp->xmt_fcp_release));
15235 }
15236 fallthrough;
15237
15238 case FC_STATUS_INSUFF_BUF_NEED_BUF:
15239 hrq->RQ_no_posted_buf++;
15240 /* Post more buffers if possible */
15241 break;
15242 case FC_STATUS_RQ_DMA_FAILURE:
15243 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15244 "2575 RQE DMA Error x%x, x%08x x%08x x%08x "
15245 "x%08x\n",
15246 status, rcqe->word0, rcqe->word1,
15247 rcqe->word2, rcqe->word3);
15248
15249 /* If IV set, no further recovery */
15250 if (bf_get(lpfc_rcqe_iv, rcqe))
15251 break;
15252
15253 /* recycle consumed resource */
15254 spin_lock_irqsave(&phba->hbalock, iflags);
15255 lpfc_sli4_rq_release(hrq, drq);
15256 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15257 if (!dma_buf) {
15258 hrq->RQ_no_buf_found++;
15259 spin_unlock_irqrestore(&phba->hbalock, iflags);
15260 break;
15261 }
15262 hrq->RQ_rcv_buf++;
15263 hrq->RQ_buf_posted--;
15264 spin_unlock_irqrestore(&phba->hbalock, iflags);
15265 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15266 break;
15267 default:
15268 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15269 "2576 Unexpected RQE Status x%x, w0-3 x%08x "
15270 "x%08x x%08x x%08x\n",
15271 status, rcqe->word0, rcqe->word1,
15272 rcqe->word2, rcqe->word3);
15273 break;
15274 }
15275 out:
15276 return workposted;
15277 }
15278
15279 /**
15280 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
15281 * @phba: adapter with cq
15282 * @cq: Pointer to the completion queue.
15283 * @cqe: Pointer to fast-path completion queue entry.
15284 *
15285 * This routine process a fast-path work queue completion entry from fast-path
15286 * event queue for FCP command response completion.
15287 *
15288 * Return: true if work posted to worker thread, otherwise false.
15289 **/
15290 static bool
lpfc_sli4_fp_handle_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)15291 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15292 struct lpfc_cqe *cqe)
15293 {
15294 struct lpfc_wcqe_release wcqe;
15295 bool workposted = false;
15296
15297 /* Copy the work queue CQE and convert endian order if needed */
15298 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
15299
15300 /* Check and process for different type of WCQE and dispatch */
15301 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
15302 case CQE_CODE_COMPL_WQE:
15303 case CQE_CODE_NVME_ERSP:
15304 cq->CQ_wq++;
15305 /* Process the WQ complete event */
15306 phba->last_completion_time = jiffies;
15307 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
15308 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
15309 (struct lpfc_wcqe_complete *)&wcqe);
15310 break;
15311 case CQE_CODE_RELEASE_WQE:
15312 cq->CQ_release_wqe++;
15313 /* Process the WQ release event */
15314 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
15315 (struct lpfc_wcqe_release *)&wcqe);
15316 break;
15317 case CQE_CODE_XRI_ABORTED:
15318 cq->CQ_xri_aborted++;
15319 /* Process the WQ XRI abort event */
15320 phba->last_completion_time = jiffies;
15321 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
15322 (struct sli4_wcqe_xri_aborted *)&wcqe);
15323 break;
15324 case CQE_CODE_RECEIVE_V1:
15325 case CQE_CODE_RECEIVE:
15326 phba->last_completion_time = jiffies;
15327 if (cq->subtype == LPFC_NVMET) {
15328 workposted = lpfc_sli4_nvmet_handle_rcqe(
15329 phba, cq, (struct lpfc_rcqe *)&wcqe);
15330 }
15331 break;
15332 default:
15333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15334 "0144 Not a valid CQE code: x%x\n",
15335 bf_get(lpfc_wcqe_c_code, &wcqe));
15336 break;
15337 }
15338 return workposted;
15339 }
15340
15341 /**
15342 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15343 * @cq: Pointer to CQ to be processed
15344 *
15345 * This routine calls the cq processing routine with the handler for
15346 * fast path CQEs.
15347 *
15348 * The CQ routine returns two values: the first is the calling status,
15349 * which indicates whether work was queued to the background discovery
15350 * thread. If true, the routine should wakeup the discovery thread;
15351 * the second is the delay parameter. If non-zero, rather than rearming
15352 * the CQ and yet another interrupt, the CQ handler should be queued so
15353 * that it is processed in a subsequent polling action. The value of
15354 * the delay indicates when to reschedule it.
15355 **/
15356 static void
__lpfc_sli4_hba_process_cq(struct lpfc_queue * cq)15357 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
15358 {
15359 struct lpfc_hba *phba = cq->phba;
15360 unsigned long delay;
15361 bool workposted = false;
15362 int ret;
15363
15364 /* process and rearm the CQ */
15365 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15366 &delay);
15367
15368 if (delay) {
15369 if (is_kdump_kernel())
15370 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15371 delay);
15372 else
15373 ret = queue_delayed_work_on(cq->chann, phba->wq,
15374 &cq->sched_irqwork, delay);
15375 if (!ret)
15376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15377 "0367 Cannot schedule queue work "
15378 "for cqid=%d on CPU %d\n",
15379 cq->queue_id, cq->chann);
15380 }
15381
15382 /* wake up worker thread if there are works to be done */
15383 if (workposted)
15384 lpfc_worker_wake_up(phba);
15385 }
15386
15387 /**
15388 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15389 * interrupt
15390 * @work: pointer to work element
15391 *
15392 * translates from the work handler and calls the fast-path handler.
15393 **/
15394 static void
lpfc_sli4_hba_process_cq(struct work_struct * work)15395 lpfc_sli4_hba_process_cq(struct work_struct *work)
15396 {
15397 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15398
15399 __lpfc_sli4_hba_process_cq(cq);
15400 }
15401
15402 /**
15403 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15404 * @phba: Pointer to HBA context object.
15405 * @eq: Pointer to the queue structure.
15406 * @eqe: Pointer to fast-path event queue entry.
15407 * @poll_mode: poll_mode to execute processing the cq.
15408 *
15409 * This routine process a event queue entry from the fast-path event queue.
15410 * It will check the MajorCode and MinorCode to determine this is for a
15411 * completion event on a completion queue, if not, an error shall be logged
15412 * and just return. Otherwise, it will get to the corresponding completion
15413 * queue and process all the entries on the completion queue, rearm the
15414 * completion queue, and then return.
15415 **/
15416 static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba * phba,struct lpfc_queue * eq,struct lpfc_eqe * eqe,enum lpfc_poll_mode poll_mode)15417 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15418 struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
15419 {
15420 struct lpfc_queue *cq = NULL;
15421 uint32_t qidx = eq->hdwq;
15422 uint16_t cqid, id;
15423 int ret;
15424
15425 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15427 "0366 Not a valid completion "
15428 "event: majorcode=x%x, minorcode=x%x\n",
15429 bf_get_le32(lpfc_eqe_major_code, eqe),
15430 bf_get_le32(lpfc_eqe_minor_code, eqe));
15431 return;
15432 }
15433
15434 /* Get the reference to the corresponding CQ */
15435 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15436
15437 /* Use the fast lookup method first */
15438 if (cqid <= phba->sli4_hba.cq_max) {
15439 cq = phba->sli4_hba.cq_lookup[cqid];
15440 if (cq)
15441 goto work_cq;
15442 }
15443
15444 /* Next check for NVMET completion */
15445 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15446 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15447 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15448 /* Process NVMET unsol rcv */
15449 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15450 goto process_cq;
15451 }
15452 }
15453
15454 if (phba->sli4_hba.nvmels_cq &&
15455 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15456 /* Process NVME unsol rcv */
15457 cq = phba->sli4_hba.nvmels_cq;
15458 }
15459
15460 /* Otherwise this is a Slow path event */
15461 if (cq == NULL) {
15462 lpfc_sli4_sp_handle_eqe(phba, eqe,
15463 phba->sli4_hba.hdwq[qidx].hba_eq);
15464 return;
15465 }
15466
15467 process_cq:
15468 if (unlikely(cqid != cq->queue_id)) {
15469 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15470 "0368 Miss-matched fast-path completion "
15471 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15472 cqid, cq->queue_id);
15473 return;
15474 }
15475
15476 work_cq:
15477 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15478 if (phba->ktime_on)
15479 cq->isr_timestamp = ktime_get_ns();
15480 else
15481 cq->isr_timestamp = 0;
15482 #endif
15483
15484 switch (poll_mode) {
15485 case LPFC_THREADED_IRQ:
15486 __lpfc_sli4_hba_process_cq(cq);
15487 break;
15488 case LPFC_QUEUE_WORK:
15489 default:
15490 if (is_kdump_kernel())
15491 ret = queue_work(phba->wq, &cq->irqwork);
15492 else
15493 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15494 if (!ret)
15495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15496 "0383 Cannot schedule queue work "
15497 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15498 cqid, cq->queue_id,
15499 raw_smp_processor_id());
15500 break;
15501 }
15502 }
15503
15504 /**
15505 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15506 * @work: pointer to work element
15507 *
15508 * translates from the work handler and calls the fast-path handler.
15509 **/
15510 static void
lpfc_sli4_dly_hba_process_cq(struct work_struct * work)15511 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15512 {
15513 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15514 struct lpfc_queue, sched_irqwork);
15515
15516 __lpfc_sli4_hba_process_cq(cq);
15517 }
15518
15519 /**
15520 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15521 * @irq: Interrupt number.
15522 * @dev_id: The device context pointer.
15523 *
15524 * This function is directly called from the PCI layer as an interrupt
15525 * service routine when device with SLI-4 interface spec is enabled with
15526 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15527 * ring event in the HBA. However, when the device is enabled with either
15528 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15529 * device-level interrupt handler. When the PCI slot is in error recovery
15530 * or the HBA is undergoing initialization, the interrupt handler will not
15531 * process the interrupt. The SCSI FCP fast-path ring event are handled in
15532 * the intrrupt context. This function is called without any lock held.
15533 * It gets the hbalock to access and update SLI data structures. Note that,
15534 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15535 * equal to that of FCP CQ index.
15536 *
15537 * The link attention and ELS ring attention events are handled
15538 * by the worker thread. The interrupt handler signals the worker thread
15539 * and returns for these events. This function is called without any lock
15540 * held. It gets the hbalock to access and update SLI data structures.
15541 *
15542 * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
15543 * when interrupt is scheduled to be handled from a threaded irq context, or
15544 * else returns IRQ_NONE.
15545 **/
15546 irqreturn_t
lpfc_sli4_hba_intr_handler(int irq,void * dev_id)15547 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15548 {
15549 struct lpfc_hba *phba;
15550 struct lpfc_hba_eq_hdl *hba_eq_hdl;
15551 struct lpfc_queue *fpeq;
15552 unsigned long iflag;
15553 int hba_eqidx;
15554 int ecount = 0;
15555 struct lpfc_eq_intr_info *eqi;
15556
15557 /* Get the driver's phba structure from the dev_id */
15558 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15559 phba = hba_eq_hdl->phba;
15560 hba_eqidx = hba_eq_hdl->idx;
15561
15562 if (unlikely(!phba))
15563 return IRQ_NONE;
15564 if (unlikely(!phba->sli4_hba.hdwq))
15565 return IRQ_NONE;
15566
15567 /* Get to the EQ struct associated with this vector */
15568 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15569 if (unlikely(!fpeq))
15570 return IRQ_NONE;
15571
15572 /* Check device state for handling interrupt */
15573 if (unlikely(lpfc_intr_state_check(phba))) {
15574 /* Check again for link_state with lock held */
15575 spin_lock_irqsave(&phba->hbalock, iflag);
15576 if (phba->link_state < LPFC_LINK_DOWN)
15577 /* Flush, clear interrupt, and rearm the EQ */
15578 lpfc_sli4_eqcq_flush(phba, fpeq);
15579 spin_unlock_irqrestore(&phba->hbalock, iflag);
15580 return IRQ_NONE;
15581 }
15582
15583 switch (fpeq->poll_mode) {
15584 case LPFC_THREADED_IRQ:
15585 /* CGN mgmt is mutually exclusive from irq processing */
15586 if (phba->cmf_active_mode == LPFC_CFG_OFF)
15587 return IRQ_WAKE_THREAD;
15588 fallthrough;
15589 case LPFC_QUEUE_WORK:
15590 default:
15591 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15592 eqi->icnt++;
15593
15594 fpeq->last_cpu = raw_smp_processor_id();
15595
15596 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15597 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15598 phba->cfg_auto_imax &&
15599 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15600 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15601 lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
15602 LPFC_MAX_AUTO_EQ_DELAY);
15603
15604 /* process and rearm the EQ */
15605 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
15606 LPFC_QUEUE_WORK);
15607
15608 if (unlikely(ecount == 0)) {
15609 fpeq->EQ_no_entry++;
15610 if (phba->intr_type == MSIX)
15611 /* MSI-X treated interrupt served as no EQ share INT */
15612 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15613 "0358 MSI-X interrupt with no EQE\n");
15614 else
15615 /* Non MSI-X treated on interrupt as EQ share INT */
15616 return IRQ_NONE;
15617 }
15618 }
15619
15620 return IRQ_HANDLED;
15621 } /* lpfc_sli4_hba_intr_handler */
15622
15623 /**
15624 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15625 * @irq: Interrupt number.
15626 * @dev_id: The device context pointer.
15627 *
15628 * This function is the device-level interrupt handler to device with SLI-4
15629 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15630 * interrupt mode is enabled and there is an event in the HBA which requires
15631 * driver attention. This function invokes the slow-path interrupt attention
15632 * handling function and fast-path interrupt attention handling function in
15633 * turn to process the relevant HBA attention events. This function is called
15634 * without any lock held. It gets the hbalock to access and update SLI data
15635 * structures.
15636 *
15637 * This function returns IRQ_HANDLED when interrupt is handled, else it
15638 * returns IRQ_NONE.
15639 **/
15640 irqreturn_t
lpfc_sli4_intr_handler(int irq,void * dev_id)15641 lpfc_sli4_intr_handler(int irq, void *dev_id)
15642 {
15643 struct lpfc_hba *phba;
15644 irqreturn_t hba_irq_rc;
15645 bool hba_handled = false;
15646 int qidx;
15647
15648 /* Get the driver's phba structure from the dev_id */
15649 phba = (struct lpfc_hba *)dev_id;
15650
15651 if (unlikely(!phba))
15652 return IRQ_NONE;
15653
15654 /*
15655 * Invoke fast-path host attention interrupt handling as appropriate.
15656 */
15657 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15658 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15659 &phba->sli4_hba.hba_eq_hdl[qidx]);
15660 if (hba_irq_rc == IRQ_HANDLED)
15661 hba_handled |= true;
15662 }
15663
15664 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15665 } /* lpfc_sli4_intr_handler */
15666
lpfc_sli4_poll_hbtimer(struct timer_list * t)15667 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15668 {
15669 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15670 struct lpfc_queue *eq;
15671
15672 rcu_read_lock();
15673
15674 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15675 lpfc_sli4_poll_eq(eq);
15676 if (!list_empty(&phba->poll_list))
15677 mod_timer(&phba->cpuhp_poll_timer,
15678 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15679
15680 rcu_read_unlock();
15681 }
15682
lpfc_sli4_add_to_poll_list(struct lpfc_queue * eq)15683 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15684 {
15685 struct lpfc_hba *phba = eq->phba;
15686
15687 /* kickstart slowpath processing if needed */
15688 if (list_empty(&phba->poll_list))
15689 mod_timer(&phba->cpuhp_poll_timer,
15690 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15691
15692 list_add_rcu(&eq->_poll_list, &phba->poll_list);
15693 synchronize_rcu();
15694 }
15695
lpfc_sli4_remove_from_poll_list(struct lpfc_queue * eq)15696 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15697 {
15698 struct lpfc_hba *phba = eq->phba;
15699
15700 /* Disable slowpath processing for this eq. Kick start the eq
15701 * by RE-ARMING the eq's ASAP
15702 */
15703 list_del_rcu(&eq->_poll_list);
15704 synchronize_rcu();
15705
15706 if (list_empty(&phba->poll_list))
15707 del_timer_sync(&phba->cpuhp_poll_timer);
15708 }
15709
lpfc_sli4_cleanup_poll_list(struct lpfc_hba * phba)15710 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15711 {
15712 struct lpfc_queue *eq, *next;
15713
15714 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15715 list_del(&eq->_poll_list);
15716
15717 INIT_LIST_HEAD(&phba->poll_list);
15718 synchronize_rcu();
15719 }
15720
15721 static inline void
__lpfc_sli4_switch_eqmode(struct lpfc_queue * eq,uint8_t mode)15722 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15723 {
15724 if (mode == eq->mode)
15725 return;
15726 /*
15727 * currently this function is only called during a hotplug
15728 * event and the cpu on which this function is executing
15729 * is going offline. By now the hotplug has instructed
15730 * the scheduler to remove this cpu from cpu active mask.
15731 * So we don't need to work about being put aside by the
15732 * scheduler for a high priority process. Yes, the inte-
15733 * rrupts could come but they are known to retire ASAP.
15734 */
15735
15736 /* Disable polling in the fastpath */
15737 WRITE_ONCE(eq->mode, mode);
15738 /* flush out the store buffer */
15739 smp_wmb();
15740
15741 /*
15742 * Add this eq to the polling list and start polling. For
15743 * a grace period both interrupt handler and poller will
15744 * try to process the eq _but_ that's fine. We have a
15745 * synchronization mechanism in place (queue_claimed) to
15746 * deal with it. This is just a draining phase for int-
15747 * errupt handler (not eq's) as we have guranteed through
15748 * barrier that all the CPUs have seen the new CQ_POLLED
15749 * state. which will effectively disable the REARMING of
15750 * the EQ. The whole idea is eq's die off eventually as
15751 * we are not rearming EQ's anymore.
15752 */
15753 mode ? lpfc_sli4_add_to_poll_list(eq) :
15754 lpfc_sli4_remove_from_poll_list(eq);
15755 }
15756
lpfc_sli4_start_polling(struct lpfc_queue * eq)15757 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15758 {
15759 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15760 }
15761
lpfc_sli4_stop_polling(struct lpfc_queue * eq)15762 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15763 {
15764 struct lpfc_hba *phba = eq->phba;
15765
15766 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15767
15768 /* Kick start for the pending io's in h/w.
15769 * Once we switch back to interrupt processing on a eq
15770 * the io path completion will only arm eq's when it
15771 * receives a completion. But since eq's are in disa-
15772 * rmed state it doesn't receive a completion. This
15773 * creates a deadlock scenaro.
15774 */
15775 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15776 }
15777
15778 /**
15779 * lpfc_sli4_queue_free - free a queue structure and associated memory
15780 * @queue: The queue structure to free.
15781 *
15782 * This function frees a queue structure and the DMAable memory used for
15783 * the host resident queue. This function must be called after destroying the
15784 * queue on the HBA.
15785 **/
15786 void
lpfc_sli4_queue_free(struct lpfc_queue * queue)15787 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15788 {
15789 struct lpfc_dmabuf *dmabuf;
15790
15791 if (!queue)
15792 return;
15793
15794 if (!list_empty(&queue->wq_list))
15795 list_del(&queue->wq_list);
15796
15797 while (!list_empty(&queue->page_list)) {
15798 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15799 list);
15800 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15801 dmabuf->virt, dmabuf->phys);
15802 kfree(dmabuf);
15803 }
15804 if (queue->rqbp) {
15805 lpfc_free_rq_buffer(queue->phba, queue);
15806 kfree(queue->rqbp);
15807 }
15808
15809 if (!list_empty(&queue->cpu_list))
15810 list_del(&queue->cpu_list);
15811
15812 kfree(queue);
15813 return;
15814 }
15815
15816 /**
15817 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15818 * @phba: The HBA that this queue is being created on.
15819 * @page_size: The size of a queue page
15820 * @entry_size: The size of each queue entry for this queue.
15821 * @entry_count: The number of entries that this queue will handle.
15822 * @cpu: The cpu that will primarily utilize this queue.
15823 *
15824 * This function allocates a queue structure and the DMAable memory used for
15825 * the host resident queue. This function must be called before creating the
15826 * queue on the HBA.
15827 **/
15828 struct lpfc_queue *
lpfc_sli4_queue_alloc(struct lpfc_hba * phba,uint32_t page_size,uint32_t entry_size,uint32_t entry_count,int cpu)15829 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15830 uint32_t entry_size, uint32_t entry_count, int cpu)
15831 {
15832 struct lpfc_queue *queue;
15833 struct lpfc_dmabuf *dmabuf;
15834 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15835 uint16_t x, pgcnt;
15836
15837 if (!phba->sli4_hba.pc_sli4_params.supported)
15838 hw_page_size = page_size;
15839
15840 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15841
15842 /* If needed, Adjust page count to match the max the adapter supports */
15843 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15844 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15845
15846 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15847 GFP_KERNEL, cpu_to_node(cpu));
15848 if (!queue)
15849 return NULL;
15850
15851 INIT_LIST_HEAD(&queue->list);
15852 INIT_LIST_HEAD(&queue->_poll_list);
15853 INIT_LIST_HEAD(&queue->wq_list);
15854 INIT_LIST_HEAD(&queue->wqfull_list);
15855 INIT_LIST_HEAD(&queue->page_list);
15856 INIT_LIST_HEAD(&queue->child_list);
15857 INIT_LIST_HEAD(&queue->cpu_list);
15858
15859 /* Set queue parameters now. If the system cannot provide memory
15860 * resources, the free routine needs to know what was allocated.
15861 */
15862 queue->page_count = pgcnt;
15863 queue->q_pgs = (void **)&queue[1];
15864 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15865 queue->entry_size = entry_size;
15866 queue->entry_count = entry_count;
15867 queue->page_size = hw_page_size;
15868 queue->phba = phba;
15869
15870 for (x = 0; x < queue->page_count; x++) {
15871 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15872 dev_to_node(&phba->pcidev->dev));
15873 if (!dmabuf)
15874 goto out_fail;
15875 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15876 hw_page_size, &dmabuf->phys,
15877 GFP_KERNEL);
15878 if (!dmabuf->virt) {
15879 kfree(dmabuf);
15880 goto out_fail;
15881 }
15882 dmabuf->buffer_tag = x;
15883 list_add_tail(&dmabuf->list, &queue->page_list);
15884 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15885 queue->q_pgs[x] = dmabuf->virt;
15886 }
15887 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15888 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15889 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15890 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15891
15892 /* notify_interval will be set during q creation */
15893
15894 return queue;
15895 out_fail:
15896 lpfc_sli4_queue_free(queue);
15897 return NULL;
15898 }
15899
15900 /**
15901 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15902 * @phba: HBA structure that indicates port to create a queue on.
15903 * @pci_barset: PCI BAR set flag.
15904 *
15905 * This function shall perform iomap of the specified PCI BAR address to host
15906 * memory address if not already done so and return it. The returned host
15907 * memory address can be NULL.
15908 */
15909 static void __iomem *
lpfc_dual_chute_pci_bar_map(struct lpfc_hba * phba,uint16_t pci_barset)15910 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15911 {
15912 if (!phba->pcidev)
15913 return NULL;
15914
15915 switch (pci_barset) {
15916 case WQ_PCI_BAR_0_AND_1:
15917 return phba->pci_bar0_memmap_p;
15918 case WQ_PCI_BAR_2_AND_3:
15919 return phba->pci_bar2_memmap_p;
15920 case WQ_PCI_BAR_4_AND_5:
15921 return phba->pci_bar4_memmap_p;
15922 default:
15923 break;
15924 }
15925 return NULL;
15926 }
15927
15928 /**
15929 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15930 * @phba: HBA structure that EQs are on.
15931 * @startq: The starting EQ index to modify
15932 * @numq: The number of EQs (consecutive indexes) to modify
15933 * @usdelay: amount of delay
15934 *
15935 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15936 * is set either by writing to a register (if supported by the SLI Port)
15937 * or by mailbox command. The mailbox command allows several EQs to be
15938 * updated at once.
15939 *
15940 * The @phba struct is used to send a mailbox command to HBA. The @startq
15941 * is used to get the starting EQ index to change. The @numq value is
15942 * used to specify how many consecutive EQ indexes, starting at EQ index,
15943 * are to be changed. This function is asynchronous and will wait for any
15944 * mailbox commands to finish before returning.
15945 *
15946 * On success this function will return a zero. If unable to allocate
15947 * enough memory this function will return -ENOMEM. If a mailbox command
15948 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15949 * have had their delay multipler changed.
15950 **/
15951 void
lpfc_modify_hba_eq_delay(struct lpfc_hba * phba,uint32_t startq,uint32_t numq,uint32_t usdelay)15952 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15953 uint32_t numq, uint32_t usdelay)
15954 {
15955 struct lpfc_mbx_modify_eq_delay *eq_delay;
15956 LPFC_MBOXQ_t *mbox;
15957 struct lpfc_queue *eq;
15958 int cnt = 0, rc, length;
15959 uint32_t shdr_status, shdr_add_status;
15960 uint32_t dmult;
15961 int qidx;
15962 union lpfc_sli4_cfg_shdr *shdr;
15963
15964 if (startq >= phba->cfg_irq_chann)
15965 return;
15966
15967 if (usdelay > 0xFFFF) {
15968 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15969 "6429 usdelay %d too large. Scaled down to "
15970 "0xFFFF.\n", usdelay);
15971 usdelay = 0xFFFF;
15972 }
15973
15974 /* set values by EQ_DELAY register if supported */
15975 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15976 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15977 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15978 if (!eq)
15979 continue;
15980
15981 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15982
15983 if (++cnt >= numq)
15984 break;
15985 }
15986 return;
15987 }
15988
15989 /* Otherwise, set values by mailbox cmd */
15990
15991 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15992 if (!mbox) {
15993 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15994 "6428 Failed allocating mailbox cmd buffer."
15995 " EQ delay was not set.\n");
15996 return;
15997 }
15998 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15999 sizeof(struct lpfc_sli4_cfg_mhdr));
16000 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16001 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
16002 length, LPFC_SLI4_MBX_EMBED);
16003 eq_delay = &mbox->u.mqe.un.eq_delay;
16004
16005 /* Calculate delay multiper from maximum interrupt per second */
16006 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
16007 if (dmult)
16008 dmult--;
16009 if (dmult > LPFC_DMULT_MAX)
16010 dmult = LPFC_DMULT_MAX;
16011
16012 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
16013 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
16014 if (!eq)
16015 continue;
16016 eq->q_mode = usdelay;
16017 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
16018 eq_delay->u.request.eq[cnt].phase = 0;
16019 eq_delay->u.request.eq[cnt].delay_multi = dmult;
16020
16021 if (++cnt >= numq)
16022 break;
16023 }
16024 eq_delay->u.request.num_eq = cnt;
16025
16026 mbox->vport = phba->pport;
16027 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16028 mbox->ctx_ndlp = NULL;
16029 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16030 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
16031 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16032 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16033 if (shdr_status || shdr_add_status || rc) {
16034 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16035 "2512 MODIFY_EQ_DELAY mailbox failed with "
16036 "status x%x add_status x%x, mbx status x%x\n",
16037 shdr_status, shdr_add_status, rc);
16038 }
16039 mempool_free(mbox, phba->mbox_mem_pool);
16040 return;
16041 }
16042
16043 /**
16044 * lpfc_eq_create - Create an Event Queue on the HBA
16045 * @phba: HBA structure that indicates port to create a queue on.
16046 * @eq: The queue structure to use to create the event queue.
16047 * @imax: The maximum interrupt per second limit.
16048 *
16049 * This function creates an event queue, as detailed in @eq, on a port,
16050 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
16051 *
16052 * The @phba struct is used to send mailbox command to HBA. The @eq struct
16053 * is used to get the entry count and entry size that are necessary to
16054 * determine the number of pages to allocate and use for this queue. This
16055 * function will send the EQ_CREATE mailbox command to the HBA to setup the
16056 * event queue. This function is asynchronous and will wait for the mailbox
16057 * command to finish before continuing.
16058 *
16059 * On success this function will return a zero. If unable to allocate enough
16060 * memory this function will return -ENOMEM. If the queue create mailbox command
16061 * fails this function will return -ENXIO.
16062 **/
16063 int
lpfc_eq_create(struct lpfc_hba * phba,struct lpfc_queue * eq,uint32_t imax)16064 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16065 {
16066 struct lpfc_mbx_eq_create *eq_create;
16067 LPFC_MBOXQ_t *mbox;
16068 int rc, length, status = 0;
16069 struct lpfc_dmabuf *dmabuf;
16070 uint32_t shdr_status, shdr_add_status;
16071 union lpfc_sli4_cfg_shdr *shdr;
16072 uint16_t dmult;
16073 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16074
16075 /* sanity check on queue memory */
16076 if (!eq)
16077 return -ENODEV;
16078 if (!phba->sli4_hba.pc_sli4_params.supported)
16079 hw_page_size = SLI4_PAGE_SIZE;
16080
16081 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16082 if (!mbox)
16083 return -ENOMEM;
16084 length = (sizeof(struct lpfc_mbx_eq_create) -
16085 sizeof(struct lpfc_sli4_cfg_mhdr));
16086 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16087 LPFC_MBOX_OPCODE_EQ_CREATE,
16088 length, LPFC_SLI4_MBX_EMBED);
16089 eq_create = &mbox->u.mqe.un.eq_create;
16090 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
16091 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
16092 eq->page_count);
16093 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
16094 LPFC_EQE_SIZE);
16095 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
16096
16097 /* Use version 2 of CREATE_EQ if eqav is set */
16098 if (phba->sli4_hba.pc_sli4_params.eqav) {
16099 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16100 LPFC_Q_CREATE_VERSION_2);
16101 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
16102 phba->sli4_hba.pc_sli4_params.eqav);
16103 }
16104
16105 /* don't setup delay multiplier using EQ_CREATE */
16106 dmult = 0;
16107 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
16108 dmult);
16109 switch (eq->entry_count) {
16110 default:
16111 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16112 "0360 Unsupported EQ count. (%d)\n",
16113 eq->entry_count);
16114 if (eq->entry_count < 256) {
16115 status = -EINVAL;
16116 goto out;
16117 }
16118 fallthrough; /* otherwise default to smallest count */
16119 case 256:
16120 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16121 LPFC_EQ_CNT_256);
16122 break;
16123 case 512:
16124 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16125 LPFC_EQ_CNT_512);
16126 break;
16127 case 1024:
16128 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16129 LPFC_EQ_CNT_1024);
16130 break;
16131 case 2048:
16132 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16133 LPFC_EQ_CNT_2048);
16134 break;
16135 case 4096:
16136 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16137 LPFC_EQ_CNT_4096);
16138 break;
16139 }
16140 list_for_each_entry(dmabuf, &eq->page_list, list) {
16141 memset(dmabuf->virt, 0, hw_page_size);
16142 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16143 putPaddrLow(dmabuf->phys);
16144 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16145 putPaddrHigh(dmabuf->phys);
16146 }
16147 mbox->vport = phba->pport;
16148 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16149 mbox->ctx_buf = NULL;
16150 mbox->ctx_ndlp = NULL;
16151 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16152 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16153 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16154 if (shdr_status || shdr_add_status || rc) {
16155 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16156 "2500 EQ_CREATE mailbox failed with "
16157 "status x%x add_status x%x, mbx status x%x\n",
16158 shdr_status, shdr_add_status, rc);
16159 status = -ENXIO;
16160 }
16161 eq->type = LPFC_EQ;
16162 eq->subtype = LPFC_NONE;
16163 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16164 if (eq->queue_id == 0xFFFF)
16165 status = -ENXIO;
16166 eq->host_index = 0;
16167 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16168 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16169 out:
16170 mempool_free(mbox, phba->mbox_mem_pool);
16171 return status;
16172 }
16173
16174 /**
16175 * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
16176 * @irq: Interrupt number.
16177 * @dev_id: The device context pointer.
16178 *
16179 * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
16180 * threaded irq context.
16181 *
16182 * Returns
16183 * IRQ_HANDLED - interrupt is handled
16184 * IRQ_NONE - otherwise
16185 **/
lpfc_sli4_hba_intr_handler_th(int irq,void * dev_id)16186 irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
16187 {
16188 struct lpfc_hba *phba;
16189 struct lpfc_hba_eq_hdl *hba_eq_hdl;
16190 struct lpfc_queue *fpeq;
16191 int ecount = 0;
16192 int hba_eqidx;
16193 struct lpfc_eq_intr_info *eqi;
16194
16195 /* Get the driver's phba structure from the dev_id */
16196 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
16197 phba = hba_eq_hdl->phba;
16198 hba_eqidx = hba_eq_hdl->idx;
16199
16200 if (unlikely(!phba))
16201 return IRQ_NONE;
16202 if (unlikely(!phba->sli4_hba.hdwq))
16203 return IRQ_NONE;
16204
16205 /* Get to the EQ struct associated with this vector */
16206 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
16207 if (unlikely(!fpeq))
16208 return IRQ_NONE;
16209
16210 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
16211 eqi->icnt++;
16212
16213 fpeq->last_cpu = raw_smp_processor_id();
16214
16215 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
16216 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
16217 phba->cfg_auto_imax &&
16218 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
16219 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
16220 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
16221
16222 /* process and rearm the EQ */
16223 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
16224 LPFC_THREADED_IRQ);
16225
16226 if (unlikely(ecount == 0)) {
16227 fpeq->EQ_no_entry++;
16228 if (phba->intr_type == MSIX)
16229 /* MSI-X treated interrupt served as no EQ share INT */
16230 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16231 "3358 MSI-X interrupt with no EQE\n");
16232 else
16233 /* Non MSI-X treated on interrupt as EQ share INT */
16234 return IRQ_NONE;
16235 }
16236 return IRQ_HANDLED;
16237 }
16238
16239 /**
16240 * lpfc_cq_create - Create a Completion Queue on the HBA
16241 * @phba: HBA structure that indicates port to create a queue on.
16242 * @cq: The queue structure to use to create the completion queue.
16243 * @eq: The event queue to bind this completion queue to.
16244 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16245 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16246 *
16247 * This function creates a completion queue, as detailed in @wq, on a port,
16248 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
16249 *
16250 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16251 * is used to get the entry count and entry size that are necessary to
16252 * determine the number of pages to allocate and use for this queue. The @eq
16253 * is used to indicate which event queue to bind this completion queue to. This
16254 * function will send the CQ_CREATE mailbox command to the HBA to setup the
16255 * completion queue. This function is asynchronous and will wait for the mailbox
16256 * command to finish before continuing.
16257 *
16258 * On success this function will return a zero. If unable to allocate enough
16259 * memory this function will return -ENOMEM. If the queue create mailbox command
16260 * fails this function will return -ENXIO.
16261 **/
16262 int
lpfc_cq_create(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_queue * eq,uint32_t type,uint32_t subtype)16263 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
16264 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16265 {
16266 struct lpfc_mbx_cq_create *cq_create;
16267 struct lpfc_dmabuf *dmabuf;
16268 LPFC_MBOXQ_t *mbox;
16269 int rc, length, status = 0;
16270 uint32_t shdr_status, shdr_add_status;
16271 union lpfc_sli4_cfg_shdr *shdr;
16272
16273 /* sanity check on queue memory */
16274 if (!cq || !eq)
16275 return -ENODEV;
16276
16277 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16278 if (!mbox)
16279 return -ENOMEM;
16280 length = (sizeof(struct lpfc_mbx_cq_create) -
16281 sizeof(struct lpfc_sli4_cfg_mhdr));
16282 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16283 LPFC_MBOX_OPCODE_CQ_CREATE,
16284 length, LPFC_SLI4_MBX_EMBED);
16285 cq_create = &mbox->u.mqe.un.cq_create;
16286 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
16287 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
16288 cq->page_count);
16289 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
16290 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
16291 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16292 phba->sli4_hba.pc_sli4_params.cqv);
16293 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
16294 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
16295 (cq->page_size / SLI4_PAGE_SIZE));
16296 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
16297 eq->queue_id);
16298 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
16299 phba->sli4_hba.pc_sli4_params.cqav);
16300 } else {
16301 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
16302 eq->queue_id);
16303 }
16304 switch (cq->entry_count) {
16305 case 2048:
16306 case 4096:
16307 if (phba->sli4_hba.pc_sli4_params.cqv ==
16308 LPFC_Q_CREATE_VERSION_2) {
16309 cq_create->u.request.context.lpfc_cq_context_count =
16310 cq->entry_count;
16311 bf_set(lpfc_cq_context_count,
16312 &cq_create->u.request.context,
16313 LPFC_CQ_CNT_WORD7);
16314 break;
16315 }
16316 fallthrough;
16317 default:
16318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16319 "0361 Unsupported CQ count: "
16320 "entry cnt %d sz %d pg cnt %d\n",
16321 cq->entry_count, cq->entry_size,
16322 cq->page_count);
16323 if (cq->entry_count < 256) {
16324 status = -EINVAL;
16325 goto out;
16326 }
16327 fallthrough; /* otherwise default to smallest count */
16328 case 256:
16329 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16330 LPFC_CQ_CNT_256);
16331 break;
16332 case 512:
16333 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16334 LPFC_CQ_CNT_512);
16335 break;
16336 case 1024:
16337 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16338 LPFC_CQ_CNT_1024);
16339 break;
16340 }
16341 list_for_each_entry(dmabuf, &cq->page_list, list) {
16342 memset(dmabuf->virt, 0, cq->page_size);
16343 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16344 putPaddrLow(dmabuf->phys);
16345 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16346 putPaddrHigh(dmabuf->phys);
16347 }
16348 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16349
16350 /* The IOCTL status is embedded in the mailbox subheader. */
16351 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16352 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16353 if (shdr_status || shdr_add_status || rc) {
16354 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16355 "2501 CQ_CREATE mailbox failed with "
16356 "status x%x add_status x%x, mbx status x%x\n",
16357 shdr_status, shdr_add_status, rc);
16358 status = -ENXIO;
16359 goto out;
16360 }
16361 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16362 if (cq->queue_id == 0xFFFF) {
16363 status = -ENXIO;
16364 goto out;
16365 }
16366 /* link the cq onto the parent eq child list */
16367 list_add_tail(&cq->list, &eq->child_list);
16368 /* Set up completion queue's type and subtype */
16369 cq->type = type;
16370 cq->subtype = subtype;
16371 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16372 cq->assoc_qid = eq->queue_id;
16373 cq->assoc_qp = eq;
16374 cq->host_index = 0;
16375 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16376 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16377
16378 if (cq->queue_id > phba->sli4_hba.cq_max)
16379 phba->sli4_hba.cq_max = cq->queue_id;
16380 out:
16381 mempool_free(mbox, phba->mbox_mem_pool);
16382 return status;
16383 }
16384
16385 /**
16386 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16387 * @phba: HBA structure that indicates port to create a queue on.
16388 * @cqp: The queue structure array to use to create the completion queues.
16389 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
16390 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16391 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16392 *
16393 * This function creates a set of completion queue, s to support MRQ
16394 * as detailed in @cqp, on a port,
16395 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16396 *
16397 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16398 * is used to get the entry count and entry size that are necessary to
16399 * determine the number of pages to allocate and use for this queue. The @eq
16400 * is used to indicate which event queue to bind this completion queue to. This
16401 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16402 * completion queue. This function is asynchronous and will wait for the mailbox
16403 * command to finish before continuing.
16404 *
16405 * On success this function will return a zero. If unable to allocate enough
16406 * memory this function will return -ENOMEM. If the queue create mailbox command
16407 * fails this function will return -ENXIO.
16408 **/
16409 int
lpfc_cq_create_set(struct lpfc_hba * phba,struct lpfc_queue ** cqp,struct lpfc_sli4_hdw_queue * hdwq,uint32_t type,uint32_t subtype)16410 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16411 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16412 uint32_t subtype)
16413 {
16414 struct lpfc_queue *cq;
16415 struct lpfc_queue *eq;
16416 struct lpfc_mbx_cq_create_set *cq_set;
16417 struct lpfc_dmabuf *dmabuf;
16418 LPFC_MBOXQ_t *mbox;
16419 int rc, length, alloclen, status = 0;
16420 int cnt, idx, numcq, page_idx = 0;
16421 uint32_t shdr_status, shdr_add_status;
16422 union lpfc_sli4_cfg_shdr *shdr;
16423 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16424
16425 /* sanity check on queue memory */
16426 numcq = phba->cfg_nvmet_mrq;
16427 if (!cqp || !hdwq || !numcq)
16428 return -ENODEV;
16429
16430 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16431 if (!mbox)
16432 return -ENOMEM;
16433
16434 length = sizeof(struct lpfc_mbx_cq_create_set);
16435 length += ((numcq * cqp[0]->page_count) *
16436 sizeof(struct dma_address));
16437 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16438 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16439 LPFC_SLI4_MBX_NEMBED);
16440 if (alloclen < length) {
16441 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16442 "3098 Allocated DMA memory size (%d) is "
16443 "less than the requested DMA memory size "
16444 "(%d)\n", alloclen, length);
16445 status = -ENOMEM;
16446 goto out;
16447 }
16448 cq_set = mbox->sge_array->addr[0];
16449 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16450 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16451
16452 for (idx = 0; idx < numcq; idx++) {
16453 cq = cqp[idx];
16454 eq = hdwq[idx].hba_eq;
16455 if (!cq || !eq) {
16456 status = -ENOMEM;
16457 goto out;
16458 }
16459 if (!phba->sli4_hba.pc_sli4_params.supported)
16460 hw_page_size = cq->page_size;
16461
16462 switch (idx) {
16463 case 0:
16464 bf_set(lpfc_mbx_cq_create_set_page_size,
16465 &cq_set->u.request,
16466 (hw_page_size / SLI4_PAGE_SIZE));
16467 bf_set(lpfc_mbx_cq_create_set_num_pages,
16468 &cq_set->u.request, cq->page_count);
16469 bf_set(lpfc_mbx_cq_create_set_evt,
16470 &cq_set->u.request, 1);
16471 bf_set(lpfc_mbx_cq_create_set_valid,
16472 &cq_set->u.request, 1);
16473 bf_set(lpfc_mbx_cq_create_set_cqe_size,
16474 &cq_set->u.request, 0);
16475 bf_set(lpfc_mbx_cq_create_set_num_cq,
16476 &cq_set->u.request, numcq);
16477 bf_set(lpfc_mbx_cq_create_set_autovalid,
16478 &cq_set->u.request,
16479 phba->sli4_hba.pc_sli4_params.cqav);
16480 switch (cq->entry_count) {
16481 case 2048:
16482 case 4096:
16483 if (phba->sli4_hba.pc_sli4_params.cqv ==
16484 LPFC_Q_CREATE_VERSION_2) {
16485 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16486 &cq_set->u.request,
16487 cq->entry_count);
16488 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16489 &cq_set->u.request,
16490 LPFC_CQ_CNT_WORD7);
16491 break;
16492 }
16493 fallthrough;
16494 default:
16495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16496 "3118 Bad CQ count. (%d)\n",
16497 cq->entry_count);
16498 if (cq->entry_count < 256) {
16499 status = -EINVAL;
16500 goto out;
16501 }
16502 fallthrough; /* otherwise default to smallest */
16503 case 256:
16504 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16505 &cq_set->u.request, LPFC_CQ_CNT_256);
16506 break;
16507 case 512:
16508 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16509 &cq_set->u.request, LPFC_CQ_CNT_512);
16510 break;
16511 case 1024:
16512 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16513 &cq_set->u.request, LPFC_CQ_CNT_1024);
16514 break;
16515 }
16516 bf_set(lpfc_mbx_cq_create_set_eq_id0,
16517 &cq_set->u.request, eq->queue_id);
16518 break;
16519 case 1:
16520 bf_set(lpfc_mbx_cq_create_set_eq_id1,
16521 &cq_set->u.request, eq->queue_id);
16522 break;
16523 case 2:
16524 bf_set(lpfc_mbx_cq_create_set_eq_id2,
16525 &cq_set->u.request, eq->queue_id);
16526 break;
16527 case 3:
16528 bf_set(lpfc_mbx_cq_create_set_eq_id3,
16529 &cq_set->u.request, eq->queue_id);
16530 break;
16531 case 4:
16532 bf_set(lpfc_mbx_cq_create_set_eq_id4,
16533 &cq_set->u.request, eq->queue_id);
16534 break;
16535 case 5:
16536 bf_set(lpfc_mbx_cq_create_set_eq_id5,
16537 &cq_set->u.request, eq->queue_id);
16538 break;
16539 case 6:
16540 bf_set(lpfc_mbx_cq_create_set_eq_id6,
16541 &cq_set->u.request, eq->queue_id);
16542 break;
16543 case 7:
16544 bf_set(lpfc_mbx_cq_create_set_eq_id7,
16545 &cq_set->u.request, eq->queue_id);
16546 break;
16547 case 8:
16548 bf_set(lpfc_mbx_cq_create_set_eq_id8,
16549 &cq_set->u.request, eq->queue_id);
16550 break;
16551 case 9:
16552 bf_set(lpfc_mbx_cq_create_set_eq_id9,
16553 &cq_set->u.request, eq->queue_id);
16554 break;
16555 case 10:
16556 bf_set(lpfc_mbx_cq_create_set_eq_id10,
16557 &cq_set->u.request, eq->queue_id);
16558 break;
16559 case 11:
16560 bf_set(lpfc_mbx_cq_create_set_eq_id11,
16561 &cq_set->u.request, eq->queue_id);
16562 break;
16563 case 12:
16564 bf_set(lpfc_mbx_cq_create_set_eq_id12,
16565 &cq_set->u.request, eq->queue_id);
16566 break;
16567 case 13:
16568 bf_set(lpfc_mbx_cq_create_set_eq_id13,
16569 &cq_set->u.request, eq->queue_id);
16570 break;
16571 case 14:
16572 bf_set(lpfc_mbx_cq_create_set_eq_id14,
16573 &cq_set->u.request, eq->queue_id);
16574 break;
16575 case 15:
16576 bf_set(lpfc_mbx_cq_create_set_eq_id15,
16577 &cq_set->u.request, eq->queue_id);
16578 break;
16579 }
16580
16581 /* link the cq onto the parent eq child list */
16582 list_add_tail(&cq->list, &eq->child_list);
16583 /* Set up completion queue's type and subtype */
16584 cq->type = type;
16585 cq->subtype = subtype;
16586 cq->assoc_qid = eq->queue_id;
16587 cq->assoc_qp = eq;
16588 cq->host_index = 0;
16589 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16590 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16591 cq->entry_count);
16592 cq->chann = idx;
16593
16594 rc = 0;
16595 list_for_each_entry(dmabuf, &cq->page_list, list) {
16596 memset(dmabuf->virt, 0, hw_page_size);
16597 cnt = page_idx + dmabuf->buffer_tag;
16598 cq_set->u.request.page[cnt].addr_lo =
16599 putPaddrLow(dmabuf->phys);
16600 cq_set->u.request.page[cnt].addr_hi =
16601 putPaddrHigh(dmabuf->phys);
16602 rc++;
16603 }
16604 page_idx += rc;
16605 }
16606
16607 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16608
16609 /* The IOCTL status is embedded in the mailbox subheader. */
16610 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16611 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16612 if (shdr_status || shdr_add_status || rc) {
16613 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16614 "3119 CQ_CREATE_SET mailbox failed with "
16615 "status x%x add_status x%x, mbx status x%x\n",
16616 shdr_status, shdr_add_status, rc);
16617 status = -ENXIO;
16618 goto out;
16619 }
16620 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16621 if (rc == 0xFFFF) {
16622 status = -ENXIO;
16623 goto out;
16624 }
16625
16626 for (idx = 0; idx < numcq; idx++) {
16627 cq = cqp[idx];
16628 cq->queue_id = rc + idx;
16629 if (cq->queue_id > phba->sli4_hba.cq_max)
16630 phba->sli4_hba.cq_max = cq->queue_id;
16631 }
16632
16633 out:
16634 lpfc_sli4_mbox_cmd_free(phba, mbox);
16635 return status;
16636 }
16637
16638 /**
16639 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16640 * @phba: HBA structure that indicates port to create a queue on.
16641 * @mq: The queue structure to use to create the mailbox queue.
16642 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16643 * @cq: The completion queue to associate with this cq.
16644 *
16645 * This function provides failback (fb) functionality when the
16646 * mq_create_ext fails on older FW generations. It's purpose is identical
16647 * to mq_create_ext otherwise.
16648 *
16649 * This routine cannot fail as all attributes were previously accessed and
16650 * initialized in mq_create_ext.
16651 **/
16652 static void
lpfc_mq_create_fb_init(struct lpfc_hba * phba,struct lpfc_queue * mq,LPFC_MBOXQ_t * mbox,struct lpfc_queue * cq)16653 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16654 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16655 {
16656 struct lpfc_mbx_mq_create *mq_create;
16657 struct lpfc_dmabuf *dmabuf;
16658 int length;
16659
16660 length = (sizeof(struct lpfc_mbx_mq_create) -
16661 sizeof(struct lpfc_sli4_cfg_mhdr));
16662 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16663 LPFC_MBOX_OPCODE_MQ_CREATE,
16664 length, LPFC_SLI4_MBX_EMBED);
16665 mq_create = &mbox->u.mqe.un.mq_create;
16666 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16667 mq->page_count);
16668 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16669 cq->queue_id);
16670 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16671 switch (mq->entry_count) {
16672 case 16:
16673 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16674 LPFC_MQ_RING_SIZE_16);
16675 break;
16676 case 32:
16677 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16678 LPFC_MQ_RING_SIZE_32);
16679 break;
16680 case 64:
16681 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16682 LPFC_MQ_RING_SIZE_64);
16683 break;
16684 case 128:
16685 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16686 LPFC_MQ_RING_SIZE_128);
16687 break;
16688 }
16689 list_for_each_entry(dmabuf, &mq->page_list, list) {
16690 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16691 putPaddrLow(dmabuf->phys);
16692 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16693 putPaddrHigh(dmabuf->phys);
16694 }
16695 }
16696
16697 /**
16698 * lpfc_mq_create - Create a mailbox Queue on the HBA
16699 * @phba: HBA structure that indicates port to create a queue on.
16700 * @mq: The queue structure to use to create the mailbox queue.
16701 * @cq: The completion queue to associate with this cq.
16702 * @subtype: The queue's subtype.
16703 *
16704 * This function creates a mailbox queue, as detailed in @mq, on a port,
16705 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16706 *
16707 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16708 * is used to get the entry count and entry size that are necessary to
16709 * determine the number of pages to allocate and use for this queue. This
16710 * function will send the MQ_CREATE mailbox command to the HBA to setup the
16711 * mailbox queue. This function is asynchronous and will wait for the mailbox
16712 * command to finish before continuing.
16713 *
16714 * On success this function will return a zero. If unable to allocate enough
16715 * memory this function will return -ENOMEM. If the queue create mailbox command
16716 * fails this function will return -ENXIO.
16717 **/
16718 int32_t
lpfc_mq_create(struct lpfc_hba * phba,struct lpfc_queue * mq,struct lpfc_queue * cq,uint32_t subtype)16719 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16720 struct lpfc_queue *cq, uint32_t subtype)
16721 {
16722 struct lpfc_mbx_mq_create *mq_create;
16723 struct lpfc_mbx_mq_create_ext *mq_create_ext;
16724 struct lpfc_dmabuf *dmabuf;
16725 LPFC_MBOXQ_t *mbox;
16726 int rc, length, status = 0;
16727 uint32_t shdr_status, shdr_add_status;
16728 union lpfc_sli4_cfg_shdr *shdr;
16729 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16730
16731 /* sanity check on queue memory */
16732 if (!mq || !cq)
16733 return -ENODEV;
16734 if (!phba->sli4_hba.pc_sli4_params.supported)
16735 hw_page_size = SLI4_PAGE_SIZE;
16736
16737 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16738 if (!mbox)
16739 return -ENOMEM;
16740 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16741 sizeof(struct lpfc_sli4_cfg_mhdr));
16742 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16743 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16744 length, LPFC_SLI4_MBX_EMBED);
16745
16746 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16747 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16748 bf_set(lpfc_mbx_mq_create_ext_num_pages,
16749 &mq_create_ext->u.request, mq->page_count);
16750 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16751 &mq_create_ext->u.request, 1);
16752 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16753 &mq_create_ext->u.request, 1);
16754 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16755 &mq_create_ext->u.request, 1);
16756 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16757 &mq_create_ext->u.request, 1);
16758 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16759 &mq_create_ext->u.request, 1);
16760 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16761 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16762 phba->sli4_hba.pc_sli4_params.mqv);
16763 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16764 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16765 cq->queue_id);
16766 else
16767 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16768 cq->queue_id);
16769 switch (mq->entry_count) {
16770 default:
16771 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16772 "0362 Unsupported MQ count. (%d)\n",
16773 mq->entry_count);
16774 if (mq->entry_count < 16) {
16775 status = -EINVAL;
16776 goto out;
16777 }
16778 fallthrough; /* otherwise default to smallest count */
16779 case 16:
16780 bf_set(lpfc_mq_context_ring_size,
16781 &mq_create_ext->u.request.context,
16782 LPFC_MQ_RING_SIZE_16);
16783 break;
16784 case 32:
16785 bf_set(lpfc_mq_context_ring_size,
16786 &mq_create_ext->u.request.context,
16787 LPFC_MQ_RING_SIZE_32);
16788 break;
16789 case 64:
16790 bf_set(lpfc_mq_context_ring_size,
16791 &mq_create_ext->u.request.context,
16792 LPFC_MQ_RING_SIZE_64);
16793 break;
16794 case 128:
16795 bf_set(lpfc_mq_context_ring_size,
16796 &mq_create_ext->u.request.context,
16797 LPFC_MQ_RING_SIZE_128);
16798 break;
16799 }
16800 list_for_each_entry(dmabuf, &mq->page_list, list) {
16801 memset(dmabuf->virt, 0, hw_page_size);
16802 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16803 putPaddrLow(dmabuf->phys);
16804 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16805 putPaddrHigh(dmabuf->phys);
16806 }
16807 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16808 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16809 &mq_create_ext->u.response);
16810 if (rc != MBX_SUCCESS) {
16811 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16812 "2795 MQ_CREATE_EXT failed with "
16813 "status x%x. Failback to MQ_CREATE.\n",
16814 rc);
16815 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16816 mq_create = &mbox->u.mqe.un.mq_create;
16817 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16818 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16819 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16820 &mq_create->u.response);
16821 }
16822
16823 /* The IOCTL status is embedded in the mailbox subheader. */
16824 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16825 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16826 if (shdr_status || shdr_add_status || rc) {
16827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16828 "2502 MQ_CREATE mailbox failed with "
16829 "status x%x add_status x%x, mbx status x%x\n",
16830 shdr_status, shdr_add_status, rc);
16831 status = -ENXIO;
16832 goto out;
16833 }
16834 if (mq->queue_id == 0xFFFF) {
16835 status = -ENXIO;
16836 goto out;
16837 }
16838 mq->type = LPFC_MQ;
16839 mq->assoc_qid = cq->queue_id;
16840 mq->subtype = subtype;
16841 mq->host_index = 0;
16842 mq->hba_index = 0;
16843
16844 /* link the mq onto the parent cq child list */
16845 list_add_tail(&mq->list, &cq->child_list);
16846 out:
16847 mempool_free(mbox, phba->mbox_mem_pool);
16848 return status;
16849 }
16850
16851 /**
16852 * lpfc_wq_create - Create a Work Queue on the HBA
16853 * @phba: HBA structure that indicates port to create a queue on.
16854 * @wq: The queue structure to use to create the work queue.
16855 * @cq: The completion queue to bind this work queue to.
16856 * @subtype: The subtype of the work queue indicating its functionality.
16857 *
16858 * This function creates a work queue, as detailed in @wq, on a port, described
16859 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16860 *
16861 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16862 * is used to get the entry count and entry size that are necessary to
16863 * determine the number of pages to allocate and use for this queue. The @cq
16864 * is used to indicate which completion queue to bind this work queue to. This
16865 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16866 * work queue. This function is asynchronous and will wait for the mailbox
16867 * command to finish before continuing.
16868 *
16869 * On success this function will return a zero. If unable to allocate enough
16870 * memory this function will return -ENOMEM. If the queue create mailbox command
16871 * fails this function will return -ENXIO.
16872 **/
16873 int
lpfc_wq_create(struct lpfc_hba * phba,struct lpfc_queue * wq,struct lpfc_queue * cq,uint32_t subtype)16874 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16875 struct lpfc_queue *cq, uint32_t subtype)
16876 {
16877 struct lpfc_mbx_wq_create *wq_create;
16878 struct lpfc_dmabuf *dmabuf;
16879 LPFC_MBOXQ_t *mbox;
16880 int rc, length, status = 0;
16881 uint32_t shdr_status, shdr_add_status;
16882 union lpfc_sli4_cfg_shdr *shdr;
16883 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16884 struct dma_address *page;
16885 void __iomem *bar_memmap_p;
16886 uint32_t db_offset;
16887 uint16_t pci_barset;
16888 uint8_t dpp_barset;
16889 uint32_t dpp_offset;
16890 uint8_t wq_create_version;
16891 #ifdef CONFIG_X86
16892 unsigned long pg_addr;
16893 #endif
16894
16895 /* sanity check on queue memory */
16896 if (!wq || !cq)
16897 return -ENODEV;
16898 if (!phba->sli4_hba.pc_sli4_params.supported)
16899 hw_page_size = wq->page_size;
16900
16901 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16902 if (!mbox)
16903 return -ENOMEM;
16904 length = (sizeof(struct lpfc_mbx_wq_create) -
16905 sizeof(struct lpfc_sli4_cfg_mhdr));
16906 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16907 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16908 length, LPFC_SLI4_MBX_EMBED);
16909 wq_create = &mbox->u.mqe.un.wq_create;
16910 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16911 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16912 wq->page_count);
16913 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16914 cq->queue_id);
16915
16916 /* wqv is the earliest version supported, NOT the latest */
16917 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16918 phba->sli4_hba.pc_sli4_params.wqv);
16919
16920 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16921 (wq->page_size > SLI4_PAGE_SIZE))
16922 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16923 else
16924 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16925
16926 switch (wq_create_version) {
16927 case LPFC_Q_CREATE_VERSION_1:
16928 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16929 wq->entry_count);
16930 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16931 LPFC_Q_CREATE_VERSION_1);
16932
16933 switch (wq->entry_size) {
16934 default:
16935 case 64:
16936 bf_set(lpfc_mbx_wq_create_wqe_size,
16937 &wq_create->u.request_1,
16938 LPFC_WQ_WQE_SIZE_64);
16939 break;
16940 case 128:
16941 bf_set(lpfc_mbx_wq_create_wqe_size,
16942 &wq_create->u.request_1,
16943 LPFC_WQ_WQE_SIZE_128);
16944 break;
16945 }
16946 /* Request DPP by default */
16947 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16948 bf_set(lpfc_mbx_wq_create_page_size,
16949 &wq_create->u.request_1,
16950 (wq->page_size / SLI4_PAGE_SIZE));
16951 page = wq_create->u.request_1.page;
16952 break;
16953 default:
16954 page = wq_create->u.request.page;
16955 break;
16956 }
16957
16958 list_for_each_entry(dmabuf, &wq->page_list, list) {
16959 memset(dmabuf->virt, 0, hw_page_size);
16960 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16961 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16962 }
16963
16964 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16965 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16966
16967 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16968 /* The IOCTL status is embedded in the mailbox subheader. */
16969 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16970 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16971 if (shdr_status || shdr_add_status || rc) {
16972 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16973 "2503 WQ_CREATE mailbox failed with "
16974 "status x%x add_status x%x, mbx status x%x\n",
16975 shdr_status, shdr_add_status, rc);
16976 status = -ENXIO;
16977 goto out;
16978 }
16979
16980 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16981 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16982 &wq_create->u.response);
16983 else
16984 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16985 &wq_create->u.response_1);
16986
16987 if (wq->queue_id == 0xFFFF) {
16988 status = -ENXIO;
16989 goto out;
16990 }
16991
16992 wq->db_format = LPFC_DB_LIST_FORMAT;
16993 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16994 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16995 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16996 &wq_create->u.response);
16997 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16998 (wq->db_format != LPFC_DB_RING_FORMAT)) {
16999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17000 "3265 WQ[%d] doorbell format "
17001 "not supported: x%x\n",
17002 wq->queue_id, wq->db_format);
17003 status = -EINVAL;
17004 goto out;
17005 }
17006 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
17007 &wq_create->u.response);
17008 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17009 pci_barset);
17010 if (!bar_memmap_p) {
17011 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17012 "3263 WQ[%d] failed to memmap "
17013 "pci barset:x%x\n",
17014 wq->queue_id, pci_barset);
17015 status = -ENOMEM;
17016 goto out;
17017 }
17018 db_offset = wq_create->u.response.doorbell_offset;
17019 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
17020 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
17021 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17022 "3252 WQ[%d] doorbell offset "
17023 "not supported: x%x\n",
17024 wq->queue_id, db_offset);
17025 status = -EINVAL;
17026 goto out;
17027 }
17028 wq->db_regaddr = bar_memmap_p + db_offset;
17029 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17030 "3264 WQ[%d]: barset:x%x, offset:x%x, "
17031 "format:x%x\n", wq->queue_id,
17032 pci_barset, db_offset, wq->db_format);
17033 } else
17034 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17035 } else {
17036 /* Check if DPP was honored by the firmware */
17037 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
17038 &wq_create->u.response_1);
17039 if (wq->dpp_enable) {
17040 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
17041 &wq_create->u.response_1);
17042 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17043 pci_barset);
17044 if (!bar_memmap_p) {
17045 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17046 "3267 WQ[%d] failed to memmap "
17047 "pci barset:x%x\n",
17048 wq->queue_id, pci_barset);
17049 status = -ENOMEM;
17050 goto out;
17051 }
17052 db_offset = wq_create->u.response_1.doorbell_offset;
17053 wq->db_regaddr = bar_memmap_p + db_offset;
17054 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
17055 &wq_create->u.response_1);
17056 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
17057 &wq_create->u.response_1);
17058 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17059 dpp_barset);
17060 if (!bar_memmap_p) {
17061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17062 "3268 WQ[%d] failed to memmap "
17063 "pci barset:x%x\n",
17064 wq->queue_id, dpp_barset);
17065 status = -ENOMEM;
17066 goto out;
17067 }
17068 dpp_offset = wq_create->u.response_1.dpp_offset;
17069 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
17070 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17071 "3271 WQ[%d]: barset:x%x, offset:x%x, "
17072 "dpp_id:x%x dpp_barset:x%x "
17073 "dpp_offset:x%x\n",
17074 wq->queue_id, pci_barset, db_offset,
17075 wq->dpp_id, dpp_barset, dpp_offset);
17076
17077 #ifdef CONFIG_X86
17078 /* Enable combined writes for DPP aperture */
17079 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
17080 rc = set_memory_wc(pg_addr, 1);
17081 if (rc) {
17082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17083 "3272 Cannot setup Combined "
17084 "Write on WQ[%d] - disable DPP\n",
17085 wq->queue_id);
17086 phba->cfg_enable_dpp = 0;
17087 }
17088 #else
17089 phba->cfg_enable_dpp = 0;
17090 #endif
17091 } else
17092 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17093 }
17094 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
17095 if (wq->pring == NULL) {
17096 status = -ENOMEM;
17097 goto out;
17098 }
17099 wq->type = LPFC_WQ;
17100 wq->assoc_qid = cq->queue_id;
17101 wq->subtype = subtype;
17102 wq->host_index = 0;
17103 wq->hba_index = 0;
17104 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
17105
17106 /* link the wq onto the parent cq child list */
17107 list_add_tail(&wq->list, &cq->child_list);
17108 out:
17109 mempool_free(mbox, phba->mbox_mem_pool);
17110 return status;
17111 }
17112
17113 /**
17114 * lpfc_rq_create - Create a Receive Queue on the HBA
17115 * @phba: HBA structure that indicates port to create a queue on.
17116 * @hrq: The queue structure to use to create the header receive queue.
17117 * @drq: The queue structure to use to create the data receive queue.
17118 * @cq: The completion queue to bind this work queue to.
17119 * @subtype: The subtype of the work queue indicating its functionality.
17120 *
17121 * This function creates a receive buffer queue pair , as detailed in @hrq and
17122 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17123 * to the HBA.
17124 *
17125 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17126 * struct is used to get the entry count that is necessary to determine the
17127 * number of pages to use for this queue. The @cq is used to indicate which
17128 * completion queue to bind received buffers that are posted to these queues to.
17129 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17130 * receive queue pair. This function is asynchronous and will wait for the
17131 * mailbox command to finish before continuing.
17132 *
17133 * On success this function will return a zero. If unable to allocate enough
17134 * memory this function will return -ENOMEM. If the queue create mailbox command
17135 * fails this function will return -ENXIO.
17136 **/
17137 int
lpfc_rq_create(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq,struct lpfc_queue * cq,uint32_t subtype)17138 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17139 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
17140 {
17141 struct lpfc_mbx_rq_create *rq_create;
17142 struct lpfc_dmabuf *dmabuf;
17143 LPFC_MBOXQ_t *mbox;
17144 int rc, length, status = 0;
17145 uint32_t shdr_status, shdr_add_status;
17146 union lpfc_sli4_cfg_shdr *shdr;
17147 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17148 void __iomem *bar_memmap_p;
17149 uint32_t db_offset;
17150 uint16_t pci_barset;
17151
17152 /* sanity check on queue memory */
17153 if (!hrq || !drq || !cq)
17154 return -ENODEV;
17155 if (!phba->sli4_hba.pc_sli4_params.supported)
17156 hw_page_size = SLI4_PAGE_SIZE;
17157
17158 if (hrq->entry_count != drq->entry_count)
17159 return -EINVAL;
17160 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17161 if (!mbox)
17162 return -ENOMEM;
17163 length = (sizeof(struct lpfc_mbx_rq_create) -
17164 sizeof(struct lpfc_sli4_cfg_mhdr));
17165 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17166 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17167 length, LPFC_SLI4_MBX_EMBED);
17168 rq_create = &mbox->u.mqe.un.rq_create;
17169 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17170 bf_set(lpfc_mbox_hdr_version, &shdr->request,
17171 phba->sli4_hba.pc_sli4_params.rqv);
17172 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17173 bf_set(lpfc_rq_context_rqe_count_1,
17174 &rq_create->u.request.context,
17175 hrq->entry_count);
17176 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
17177 bf_set(lpfc_rq_context_rqe_size,
17178 &rq_create->u.request.context,
17179 LPFC_RQE_SIZE_8);
17180 bf_set(lpfc_rq_context_page_size,
17181 &rq_create->u.request.context,
17182 LPFC_RQ_PAGE_SIZE_4096);
17183 } else {
17184 switch (hrq->entry_count) {
17185 default:
17186 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17187 "2535 Unsupported RQ count. (%d)\n",
17188 hrq->entry_count);
17189 if (hrq->entry_count < 512) {
17190 status = -EINVAL;
17191 goto out;
17192 }
17193 fallthrough; /* otherwise default to smallest count */
17194 case 512:
17195 bf_set(lpfc_rq_context_rqe_count,
17196 &rq_create->u.request.context,
17197 LPFC_RQ_RING_SIZE_512);
17198 break;
17199 case 1024:
17200 bf_set(lpfc_rq_context_rqe_count,
17201 &rq_create->u.request.context,
17202 LPFC_RQ_RING_SIZE_1024);
17203 break;
17204 case 2048:
17205 bf_set(lpfc_rq_context_rqe_count,
17206 &rq_create->u.request.context,
17207 LPFC_RQ_RING_SIZE_2048);
17208 break;
17209 case 4096:
17210 bf_set(lpfc_rq_context_rqe_count,
17211 &rq_create->u.request.context,
17212 LPFC_RQ_RING_SIZE_4096);
17213 break;
17214 }
17215 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
17216 LPFC_HDR_BUF_SIZE);
17217 }
17218 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17219 cq->queue_id);
17220 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17221 hrq->page_count);
17222 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17223 memset(dmabuf->virt, 0, hw_page_size);
17224 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17225 putPaddrLow(dmabuf->phys);
17226 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17227 putPaddrHigh(dmabuf->phys);
17228 }
17229 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17230 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17231
17232 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17233 /* The IOCTL status is embedded in the mailbox subheader. */
17234 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17235 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17236 if (shdr_status || shdr_add_status || rc) {
17237 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17238 "2504 RQ_CREATE mailbox failed with "
17239 "status x%x add_status x%x, mbx status x%x\n",
17240 shdr_status, shdr_add_status, rc);
17241 status = -ENXIO;
17242 goto out;
17243 }
17244 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17245 if (hrq->queue_id == 0xFFFF) {
17246 status = -ENXIO;
17247 goto out;
17248 }
17249
17250 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17251 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
17252 &rq_create->u.response);
17253 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
17254 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
17255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17256 "3262 RQ [%d] doorbell format not "
17257 "supported: x%x\n", hrq->queue_id,
17258 hrq->db_format);
17259 status = -EINVAL;
17260 goto out;
17261 }
17262
17263 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
17264 &rq_create->u.response);
17265 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
17266 if (!bar_memmap_p) {
17267 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17268 "3269 RQ[%d] failed to memmap pci "
17269 "barset:x%x\n", hrq->queue_id,
17270 pci_barset);
17271 status = -ENOMEM;
17272 goto out;
17273 }
17274
17275 db_offset = rq_create->u.response.doorbell_offset;
17276 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
17277 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
17278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17279 "3270 RQ[%d] doorbell offset not "
17280 "supported: x%x\n", hrq->queue_id,
17281 db_offset);
17282 status = -EINVAL;
17283 goto out;
17284 }
17285 hrq->db_regaddr = bar_memmap_p + db_offset;
17286 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17287 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
17288 "format:x%x\n", hrq->queue_id, pci_barset,
17289 db_offset, hrq->db_format);
17290 } else {
17291 hrq->db_format = LPFC_DB_RING_FORMAT;
17292 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17293 }
17294 hrq->type = LPFC_HRQ;
17295 hrq->assoc_qid = cq->queue_id;
17296 hrq->subtype = subtype;
17297 hrq->host_index = 0;
17298 hrq->hba_index = 0;
17299 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17300
17301 /* now create the data queue */
17302 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17303 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17304 length, LPFC_SLI4_MBX_EMBED);
17305 bf_set(lpfc_mbox_hdr_version, &shdr->request,
17306 phba->sli4_hba.pc_sli4_params.rqv);
17307 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17308 bf_set(lpfc_rq_context_rqe_count_1,
17309 &rq_create->u.request.context, hrq->entry_count);
17310 if (subtype == LPFC_NVMET)
17311 rq_create->u.request.context.buffer_size =
17312 LPFC_NVMET_DATA_BUF_SIZE;
17313 else
17314 rq_create->u.request.context.buffer_size =
17315 LPFC_DATA_BUF_SIZE;
17316 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
17317 LPFC_RQE_SIZE_8);
17318 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
17319 (PAGE_SIZE/SLI4_PAGE_SIZE));
17320 } else {
17321 switch (drq->entry_count) {
17322 default:
17323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17324 "2536 Unsupported RQ count. (%d)\n",
17325 drq->entry_count);
17326 if (drq->entry_count < 512) {
17327 status = -EINVAL;
17328 goto out;
17329 }
17330 fallthrough; /* otherwise default to smallest count */
17331 case 512:
17332 bf_set(lpfc_rq_context_rqe_count,
17333 &rq_create->u.request.context,
17334 LPFC_RQ_RING_SIZE_512);
17335 break;
17336 case 1024:
17337 bf_set(lpfc_rq_context_rqe_count,
17338 &rq_create->u.request.context,
17339 LPFC_RQ_RING_SIZE_1024);
17340 break;
17341 case 2048:
17342 bf_set(lpfc_rq_context_rqe_count,
17343 &rq_create->u.request.context,
17344 LPFC_RQ_RING_SIZE_2048);
17345 break;
17346 case 4096:
17347 bf_set(lpfc_rq_context_rqe_count,
17348 &rq_create->u.request.context,
17349 LPFC_RQ_RING_SIZE_4096);
17350 break;
17351 }
17352 if (subtype == LPFC_NVMET)
17353 bf_set(lpfc_rq_context_buf_size,
17354 &rq_create->u.request.context,
17355 LPFC_NVMET_DATA_BUF_SIZE);
17356 else
17357 bf_set(lpfc_rq_context_buf_size,
17358 &rq_create->u.request.context,
17359 LPFC_DATA_BUF_SIZE);
17360 }
17361 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17362 cq->queue_id);
17363 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17364 drq->page_count);
17365 list_for_each_entry(dmabuf, &drq->page_list, list) {
17366 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17367 putPaddrLow(dmabuf->phys);
17368 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17369 putPaddrHigh(dmabuf->phys);
17370 }
17371 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17372 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17373 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17374 /* The IOCTL status is embedded in the mailbox subheader. */
17375 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17376 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17377 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17378 if (shdr_status || shdr_add_status || rc) {
17379 status = -ENXIO;
17380 goto out;
17381 }
17382 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17383 if (drq->queue_id == 0xFFFF) {
17384 status = -ENXIO;
17385 goto out;
17386 }
17387 drq->type = LPFC_DRQ;
17388 drq->assoc_qid = cq->queue_id;
17389 drq->subtype = subtype;
17390 drq->host_index = 0;
17391 drq->hba_index = 0;
17392 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17393
17394 /* link the header and data RQs onto the parent cq child list */
17395 list_add_tail(&hrq->list, &cq->child_list);
17396 list_add_tail(&drq->list, &cq->child_list);
17397
17398 out:
17399 mempool_free(mbox, phba->mbox_mem_pool);
17400 return status;
17401 }
17402
17403 /**
17404 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17405 * @phba: HBA structure that indicates port to create a queue on.
17406 * @hrqp: The queue structure array to use to create the header receive queues.
17407 * @drqp: The queue structure array to use to create the data receive queues.
17408 * @cqp: The completion queue array to bind these receive queues to.
17409 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17410 *
17411 * This function creates a receive buffer queue pair , as detailed in @hrq and
17412 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17413 * to the HBA.
17414 *
17415 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17416 * struct is used to get the entry count that is necessary to determine the
17417 * number of pages to use for this queue. The @cq is used to indicate which
17418 * completion queue to bind received buffers that are posted to these queues to.
17419 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17420 * receive queue pair. This function is asynchronous and will wait for the
17421 * mailbox command to finish before continuing.
17422 *
17423 * On success this function will return a zero. If unable to allocate enough
17424 * memory this function will return -ENOMEM. If the queue create mailbox command
17425 * fails this function will return -ENXIO.
17426 **/
17427 int
lpfc_mrq_create(struct lpfc_hba * phba,struct lpfc_queue ** hrqp,struct lpfc_queue ** drqp,struct lpfc_queue ** cqp,uint32_t subtype)17428 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17429 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17430 uint32_t subtype)
17431 {
17432 struct lpfc_queue *hrq, *drq, *cq;
17433 struct lpfc_mbx_rq_create_v2 *rq_create;
17434 struct lpfc_dmabuf *dmabuf;
17435 LPFC_MBOXQ_t *mbox;
17436 int rc, length, alloclen, status = 0;
17437 int cnt, idx, numrq, page_idx = 0;
17438 uint32_t shdr_status, shdr_add_status;
17439 union lpfc_sli4_cfg_shdr *shdr;
17440 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17441
17442 numrq = phba->cfg_nvmet_mrq;
17443 /* sanity check on array memory */
17444 if (!hrqp || !drqp || !cqp || !numrq)
17445 return -ENODEV;
17446 if (!phba->sli4_hba.pc_sli4_params.supported)
17447 hw_page_size = SLI4_PAGE_SIZE;
17448
17449 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17450 if (!mbox)
17451 return -ENOMEM;
17452
17453 length = sizeof(struct lpfc_mbx_rq_create_v2);
17454 length += ((2 * numrq * hrqp[0]->page_count) *
17455 sizeof(struct dma_address));
17456
17457 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17458 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17459 LPFC_SLI4_MBX_NEMBED);
17460 if (alloclen < length) {
17461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17462 "3099 Allocated DMA memory size (%d) is "
17463 "less than the requested DMA memory size "
17464 "(%d)\n", alloclen, length);
17465 status = -ENOMEM;
17466 goto out;
17467 }
17468
17469
17470
17471 rq_create = mbox->sge_array->addr[0];
17472 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17473
17474 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17475 cnt = 0;
17476
17477 for (idx = 0; idx < numrq; idx++) {
17478 hrq = hrqp[idx];
17479 drq = drqp[idx];
17480 cq = cqp[idx];
17481
17482 /* sanity check on queue memory */
17483 if (!hrq || !drq || !cq) {
17484 status = -ENODEV;
17485 goto out;
17486 }
17487
17488 if (hrq->entry_count != drq->entry_count) {
17489 status = -EINVAL;
17490 goto out;
17491 }
17492
17493 if (idx == 0) {
17494 bf_set(lpfc_mbx_rq_create_num_pages,
17495 &rq_create->u.request,
17496 hrq->page_count);
17497 bf_set(lpfc_mbx_rq_create_rq_cnt,
17498 &rq_create->u.request, (numrq * 2));
17499 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17500 1);
17501 bf_set(lpfc_rq_context_base_cq,
17502 &rq_create->u.request.context,
17503 cq->queue_id);
17504 bf_set(lpfc_rq_context_data_size,
17505 &rq_create->u.request.context,
17506 LPFC_NVMET_DATA_BUF_SIZE);
17507 bf_set(lpfc_rq_context_hdr_size,
17508 &rq_create->u.request.context,
17509 LPFC_HDR_BUF_SIZE);
17510 bf_set(lpfc_rq_context_rqe_count_1,
17511 &rq_create->u.request.context,
17512 hrq->entry_count);
17513 bf_set(lpfc_rq_context_rqe_size,
17514 &rq_create->u.request.context,
17515 LPFC_RQE_SIZE_8);
17516 bf_set(lpfc_rq_context_page_size,
17517 &rq_create->u.request.context,
17518 (PAGE_SIZE/SLI4_PAGE_SIZE));
17519 }
17520 rc = 0;
17521 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17522 memset(dmabuf->virt, 0, hw_page_size);
17523 cnt = page_idx + dmabuf->buffer_tag;
17524 rq_create->u.request.page[cnt].addr_lo =
17525 putPaddrLow(dmabuf->phys);
17526 rq_create->u.request.page[cnt].addr_hi =
17527 putPaddrHigh(dmabuf->phys);
17528 rc++;
17529 }
17530 page_idx += rc;
17531
17532 rc = 0;
17533 list_for_each_entry(dmabuf, &drq->page_list, list) {
17534 memset(dmabuf->virt, 0, hw_page_size);
17535 cnt = page_idx + dmabuf->buffer_tag;
17536 rq_create->u.request.page[cnt].addr_lo =
17537 putPaddrLow(dmabuf->phys);
17538 rq_create->u.request.page[cnt].addr_hi =
17539 putPaddrHigh(dmabuf->phys);
17540 rc++;
17541 }
17542 page_idx += rc;
17543
17544 hrq->db_format = LPFC_DB_RING_FORMAT;
17545 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17546 hrq->type = LPFC_HRQ;
17547 hrq->assoc_qid = cq->queue_id;
17548 hrq->subtype = subtype;
17549 hrq->host_index = 0;
17550 hrq->hba_index = 0;
17551 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17552
17553 drq->db_format = LPFC_DB_RING_FORMAT;
17554 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17555 drq->type = LPFC_DRQ;
17556 drq->assoc_qid = cq->queue_id;
17557 drq->subtype = subtype;
17558 drq->host_index = 0;
17559 drq->hba_index = 0;
17560 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17561
17562 list_add_tail(&hrq->list, &cq->child_list);
17563 list_add_tail(&drq->list, &cq->child_list);
17564 }
17565
17566 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17567 /* The IOCTL status is embedded in the mailbox subheader. */
17568 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17569 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17570 if (shdr_status || shdr_add_status || rc) {
17571 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17572 "3120 RQ_CREATE mailbox failed with "
17573 "status x%x add_status x%x, mbx status x%x\n",
17574 shdr_status, shdr_add_status, rc);
17575 status = -ENXIO;
17576 goto out;
17577 }
17578 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17579 if (rc == 0xFFFF) {
17580 status = -ENXIO;
17581 goto out;
17582 }
17583
17584 /* Initialize all RQs with associated queue id */
17585 for (idx = 0; idx < numrq; idx++) {
17586 hrq = hrqp[idx];
17587 hrq->queue_id = rc + (2 * idx);
17588 drq = drqp[idx];
17589 drq->queue_id = rc + (2 * idx) + 1;
17590 }
17591
17592 out:
17593 lpfc_sli4_mbox_cmd_free(phba, mbox);
17594 return status;
17595 }
17596
17597 /**
17598 * lpfc_eq_destroy - Destroy an event Queue on the HBA
17599 * @phba: HBA structure that indicates port to destroy a queue on.
17600 * @eq: The queue structure associated with the queue to destroy.
17601 *
17602 * This function destroys a queue, as detailed in @eq by sending an mailbox
17603 * command, specific to the type of queue, to the HBA.
17604 *
17605 * The @eq struct is used to get the queue ID of the queue to destroy.
17606 *
17607 * On success this function will return a zero. If the queue destroy mailbox
17608 * command fails this function will return -ENXIO.
17609 **/
17610 int
lpfc_eq_destroy(struct lpfc_hba * phba,struct lpfc_queue * eq)17611 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17612 {
17613 LPFC_MBOXQ_t *mbox;
17614 int rc, length, status = 0;
17615 uint32_t shdr_status, shdr_add_status;
17616 union lpfc_sli4_cfg_shdr *shdr;
17617
17618 /* sanity check on queue memory */
17619 if (!eq)
17620 return -ENODEV;
17621
17622 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17623 if (!mbox)
17624 return -ENOMEM;
17625 length = (sizeof(struct lpfc_mbx_eq_destroy) -
17626 sizeof(struct lpfc_sli4_cfg_mhdr));
17627 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17628 LPFC_MBOX_OPCODE_EQ_DESTROY,
17629 length, LPFC_SLI4_MBX_EMBED);
17630 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17631 eq->queue_id);
17632 mbox->vport = eq->phba->pport;
17633 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17634
17635 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17636 /* The IOCTL status is embedded in the mailbox subheader. */
17637 shdr = (union lpfc_sli4_cfg_shdr *)
17638 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17639 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17640 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17641 if (shdr_status || shdr_add_status || rc) {
17642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17643 "2505 EQ_DESTROY mailbox failed with "
17644 "status x%x add_status x%x, mbx status x%x\n",
17645 shdr_status, shdr_add_status, rc);
17646 status = -ENXIO;
17647 }
17648
17649 /* Remove eq from any list */
17650 list_del_init(&eq->list);
17651 mempool_free(mbox, eq->phba->mbox_mem_pool);
17652 return status;
17653 }
17654
17655 /**
17656 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17657 * @phba: HBA structure that indicates port to destroy a queue on.
17658 * @cq: The queue structure associated with the queue to destroy.
17659 *
17660 * This function destroys a queue, as detailed in @cq by sending an mailbox
17661 * command, specific to the type of queue, to the HBA.
17662 *
17663 * The @cq struct is used to get the queue ID of the queue to destroy.
17664 *
17665 * On success this function will return a zero. If the queue destroy mailbox
17666 * command fails this function will return -ENXIO.
17667 **/
17668 int
lpfc_cq_destroy(struct lpfc_hba * phba,struct lpfc_queue * cq)17669 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17670 {
17671 LPFC_MBOXQ_t *mbox;
17672 int rc, length, status = 0;
17673 uint32_t shdr_status, shdr_add_status;
17674 union lpfc_sli4_cfg_shdr *shdr;
17675
17676 /* sanity check on queue memory */
17677 if (!cq)
17678 return -ENODEV;
17679 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17680 if (!mbox)
17681 return -ENOMEM;
17682 length = (sizeof(struct lpfc_mbx_cq_destroy) -
17683 sizeof(struct lpfc_sli4_cfg_mhdr));
17684 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17685 LPFC_MBOX_OPCODE_CQ_DESTROY,
17686 length, LPFC_SLI4_MBX_EMBED);
17687 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17688 cq->queue_id);
17689 mbox->vport = cq->phba->pport;
17690 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17691 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17692 /* The IOCTL status is embedded in the mailbox subheader. */
17693 shdr = (union lpfc_sli4_cfg_shdr *)
17694 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17695 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17696 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17697 if (shdr_status || shdr_add_status || rc) {
17698 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17699 "2506 CQ_DESTROY mailbox failed with "
17700 "status x%x add_status x%x, mbx status x%x\n",
17701 shdr_status, shdr_add_status, rc);
17702 status = -ENXIO;
17703 }
17704 /* Remove cq from any list */
17705 list_del_init(&cq->list);
17706 mempool_free(mbox, cq->phba->mbox_mem_pool);
17707 return status;
17708 }
17709
17710 /**
17711 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17712 * @phba: HBA structure that indicates port to destroy a queue on.
17713 * @mq: The queue structure associated with the queue to destroy.
17714 *
17715 * This function destroys a queue, as detailed in @mq by sending an mailbox
17716 * command, specific to the type of queue, to the HBA.
17717 *
17718 * The @mq struct is used to get the queue ID of the queue to destroy.
17719 *
17720 * On success this function will return a zero. If the queue destroy mailbox
17721 * command fails this function will return -ENXIO.
17722 **/
17723 int
lpfc_mq_destroy(struct lpfc_hba * phba,struct lpfc_queue * mq)17724 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17725 {
17726 LPFC_MBOXQ_t *mbox;
17727 int rc, length, status = 0;
17728 uint32_t shdr_status, shdr_add_status;
17729 union lpfc_sli4_cfg_shdr *shdr;
17730
17731 /* sanity check on queue memory */
17732 if (!mq)
17733 return -ENODEV;
17734 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17735 if (!mbox)
17736 return -ENOMEM;
17737 length = (sizeof(struct lpfc_mbx_mq_destroy) -
17738 sizeof(struct lpfc_sli4_cfg_mhdr));
17739 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17740 LPFC_MBOX_OPCODE_MQ_DESTROY,
17741 length, LPFC_SLI4_MBX_EMBED);
17742 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17743 mq->queue_id);
17744 mbox->vport = mq->phba->pport;
17745 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17746 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17747 /* The IOCTL status is embedded in the mailbox subheader. */
17748 shdr = (union lpfc_sli4_cfg_shdr *)
17749 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17750 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17751 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17752 if (shdr_status || shdr_add_status || rc) {
17753 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17754 "2507 MQ_DESTROY mailbox failed with "
17755 "status x%x add_status x%x, mbx status x%x\n",
17756 shdr_status, shdr_add_status, rc);
17757 status = -ENXIO;
17758 }
17759 /* Remove mq from any list */
17760 list_del_init(&mq->list);
17761 mempool_free(mbox, mq->phba->mbox_mem_pool);
17762 return status;
17763 }
17764
17765 /**
17766 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17767 * @phba: HBA structure that indicates port to destroy a queue on.
17768 * @wq: The queue structure associated with the queue to destroy.
17769 *
17770 * This function destroys a queue, as detailed in @wq by sending an mailbox
17771 * command, specific to the type of queue, to the HBA.
17772 *
17773 * The @wq struct is used to get the queue ID of the queue to destroy.
17774 *
17775 * On success this function will return a zero. If the queue destroy mailbox
17776 * command fails this function will return -ENXIO.
17777 **/
17778 int
lpfc_wq_destroy(struct lpfc_hba * phba,struct lpfc_queue * wq)17779 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17780 {
17781 LPFC_MBOXQ_t *mbox;
17782 int rc, length, status = 0;
17783 uint32_t shdr_status, shdr_add_status;
17784 union lpfc_sli4_cfg_shdr *shdr;
17785
17786 /* sanity check on queue memory */
17787 if (!wq)
17788 return -ENODEV;
17789 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17790 if (!mbox)
17791 return -ENOMEM;
17792 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17793 sizeof(struct lpfc_sli4_cfg_mhdr));
17794 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17795 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17796 length, LPFC_SLI4_MBX_EMBED);
17797 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17798 wq->queue_id);
17799 mbox->vport = wq->phba->pport;
17800 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17801 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17802 shdr = (union lpfc_sli4_cfg_shdr *)
17803 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17804 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17805 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17806 if (shdr_status || shdr_add_status || rc) {
17807 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17808 "2508 WQ_DESTROY mailbox failed with "
17809 "status x%x add_status x%x, mbx status x%x\n",
17810 shdr_status, shdr_add_status, rc);
17811 status = -ENXIO;
17812 }
17813 /* Remove wq from any list */
17814 list_del_init(&wq->list);
17815 kfree(wq->pring);
17816 wq->pring = NULL;
17817 mempool_free(mbox, wq->phba->mbox_mem_pool);
17818 return status;
17819 }
17820
17821 /**
17822 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17823 * @phba: HBA structure that indicates port to destroy a queue on.
17824 * @hrq: The queue structure associated with the queue to destroy.
17825 * @drq: The queue structure associated with the queue to destroy.
17826 *
17827 * This function destroys a queue, as detailed in @rq by sending an mailbox
17828 * command, specific to the type of queue, to the HBA.
17829 *
17830 * The @rq struct is used to get the queue ID of the queue to destroy.
17831 *
17832 * On success this function will return a zero. If the queue destroy mailbox
17833 * command fails this function will return -ENXIO.
17834 **/
17835 int
lpfc_rq_destroy(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq)17836 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17837 struct lpfc_queue *drq)
17838 {
17839 LPFC_MBOXQ_t *mbox;
17840 int rc, length, status = 0;
17841 uint32_t shdr_status, shdr_add_status;
17842 union lpfc_sli4_cfg_shdr *shdr;
17843
17844 /* sanity check on queue memory */
17845 if (!hrq || !drq)
17846 return -ENODEV;
17847 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17848 if (!mbox)
17849 return -ENOMEM;
17850 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17851 sizeof(struct lpfc_sli4_cfg_mhdr));
17852 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17853 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17854 length, LPFC_SLI4_MBX_EMBED);
17855 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17856 hrq->queue_id);
17857 mbox->vport = hrq->phba->pport;
17858 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17859 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17860 /* The IOCTL status is embedded in the mailbox subheader. */
17861 shdr = (union lpfc_sli4_cfg_shdr *)
17862 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17863 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17864 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17865 if (shdr_status || shdr_add_status || rc) {
17866 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17867 "2509 RQ_DESTROY mailbox failed with "
17868 "status x%x add_status x%x, mbx status x%x\n",
17869 shdr_status, shdr_add_status, rc);
17870 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17871 return -ENXIO;
17872 }
17873 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17874 drq->queue_id);
17875 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17876 shdr = (union lpfc_sli4_cfg_shdr *)
17877 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17878 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17879 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17880 if (shdr_status || shdr_add_status || rc) {
17881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17882 "2510 RQ_DESTROY mailbox failed with "
17883 "status x%x add_status x%x, mbx status x%x\n",
17884 shdr_status, shdr_add_status, rc);
17885 status = -ENXIO;
17886 }
17887 list_del_init(&hrq->list);
17888 list_del_init(&drq->list);
17889 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17890 return status;
17891 }
17892
17893 /**
17894 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17895 * @phba: The virtual port for which this call being executed.
17896 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17897 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17898 * @xritag: the xritag that ties this io to the SGL pages.
17899 *
17900 * This routine will post the sgl pages for the IO that has the xritag
17901 * that is in the iocbq structure. The xritag is assigned during iocbq
17902 * creation and persists for as long as the driver is loaded.
17903 * if the caller has fewer than 256 scatter gather segments to map then
17904 * pdma_phys_addr1 should be 0.
17905 * If the caller needs to map more than 256 scatter gather segment then
17906 * pdma_phys_addr1 should be a valid physical address.
17907 * physical address for SGLs must be 64 byte aligned.
17908 * If you are going to map 2 SGL's then the first one must have 256 entries
17909 * the second sgl can have between 1 and 256 entries.
17910 *
17911 * Return codes:
17912 * 0 - Success
17913 * -ENXIO, -ENOMEM - Failure
17914 **/
17915 int
lpfc_sli4_post_sgl(struct lpfc_hba * phba,dma_addr_t pdma_phys_addr0,dma_addr_t pdma_phys_addr1,uint16_t xritag)17916 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17917 dma_addr_t pdma_phys_addr0,
17918 dma_addr_t pdma_phys_addr1,
17919 uint16_t xritag)
17920 {
17921 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17922 LPFC_MBOXQ_t *mbox;
17923 int rc;
17924 uint32_t shdr_status, shdr_add_status;
17925 uint32_t mbox_tmo;
17926 union lpfc_sli4_cfg_shdr *shdr;
17927
17928 if (xritag == NO_XRI) {
17929 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17930 "0364 Invalid param:\n");
17931 return -EINVAL;
17932 }
17933
17934 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17935 if (!mbox)
17936 return -ENOMEM;
17937
17938 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17939 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17940 sizeof(struct lpfc_mbx_post_sgl_pages) -
17941 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17942
17943 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17944 &mbox->u.mqe.un.post_sgl_pages;
17945 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17946 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17947
17948 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17949 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17950 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17951 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17952
17953 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17954 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17955 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17956 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17957 if (!phba->sli4_hba.intr_enable)
17958 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17959 else {
17960 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17961 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17962 }
17963 /* The IOCTL status is embedded in the mailbox subheader. */
17964 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17965 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17966 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17967 if (!phba->sli4_hba.intr_enable)
17968 mempool_free(mbox, phba->mbox_mem_pool);
17969 else if (rc != MBX_TIMEOUT)
17970 mempool_free(mbox, phba->mbox_mem_pool);
17971 if (shdr_status || shdr_add_status || rc) {
17972 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17973 "2511 POST_SGL mailbox failed with "
17974 "status x%x add_status x%x, mbx status x%x\n",
17975 shdr_status, shdr_add_status, rc);
17976 }
17977 return 0;
17978 }
17979
17980 /**
17981 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17982 * @phba: pointer to lpfc hba data structure.
17983 *
17984 * This routine is invoked to post rpi header templates to the
17985 * HBA consistent with the SLI-4 interface spec. This routine
17986 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17987 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17988 *
17989 * Returns
17990 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17991 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17992 **/
17993 static uint16_t
lpfc_sli4_alloc_xri(struct lpfc_hba * phba)17994 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17995 {
17996 unsigned long xri;
17997
17998 /*
17999 * Fetch the next logical xri. Because this index is logical,
18000 * the driver starts at 0 each time.
18001 */
18002 spin_lock_irq(&phba->hbalock);
18003 xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
18004 phba->sli4_hba.max_cfg_param.max_xri);
18005 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
18006 spin_unlock_irq(&phba->hbalock);
18007 return NO_XRI;
18008 } else {
18009 set_bit(xri, phba->sli4_hba.xri_bmask);
18010 phba->sli4_hba.max_cfg_param.xri_used++;
18011 }
18012 spin_unlock_irq(&phba->hbalock);
18013 return xri;
18014 }
18015
18016 /**
18017 * __lpfc_sli4_free_xri - Release an xri for reuse.
18018 * @phba: pointer to lpfc hba data structure.
18019 * @xri: xri to release.
18020 *
18021 * This routine is invoked to release an xri to the pool of
18022 * available rpis maintained by the driver.
18023 **/
18024 static void
__lpfc_sli4_free_xri(struct lpfc_hba * phba,int xri)18025 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18026 {
18027 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
18028 phba->sli4_hba.max_cfg_param.xri_used--;
18029 }
18030 }
18031
18032 /**
18033 * lpfc_sli4_free_xri - Release an xri for reuse.
18034 * @phba: pointer to lpfc hba data structure.
18035 * @xri: xri to release.
18036 *
18037 * This routine is invoked to release an xri to the pool of
18038 * available rpis maintained by the driver.
18039 **/
18040 void
lpfc_sli4_free_xri(struct lpfc_hba * phba,int xri)18041 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18042 {
18043 spin_lock_irq(&phba->hbalock);
18044 __lpfc_sli4_free_xri(phba, xri);
18045 spin_unlock_irq(&phba->hbalock);
18046 }
18047
18048 /**
18049 * lpfc_sli4_next_xritag - Get an xritag for the io
18050 * @phba: Pointer to HBA context object.
18051 *
18052 * This function gets an xritag for the iocb. If there is no unused xritag
18053 * it will return 0xffff.
18054 * The function returns the allocated xritag if successful, else returns zero.
18055 * Zero is not a valid xritag.
18056 * The caller is not required to hold any lock.
18057 **/
18058 uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba * phba)18059 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
18060 {
18061 uint16_t xri_index;
18062
18063 xri_index = lpfc_sli4_alloc_xri(phba);
18064 if (xri_index == NO_XRI)
18065 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18066 "2004 Failed to allocate XRI.last XRITAG is %d"
18067 " Max XRI is %d, Used XRI is %d\n",
18068 xri_index,
18069 phba->sli4_hba.max_cfg_param.max_xri,
18070 phba->sli4_hba.max_cfg_param.xri_used);
18071 return xri_index;
18072 }
18073
18074 /**
18075 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
18076 * @phba: pointer to lpfc hba data structure.
18077 * @post_sgl_list: pointer to els sgl entry list.
18078 * @post_cnt: number of els sgl entries on the list.
18079 *
18080 * This routine is invoked to post a block of driver's sgl pages to the
18081 * HBA using non-embedded mailbox command. No Lock is held. This routine
18082 * is only called when the driver is loading and after all IO has been
18083 * stopped.
18084 **/
18085 static int
lpfc_sli4_post_sgl_list(struct lpfc_hba * phba,struct list_head * post_sgl_list,int post_cnt)18086 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
18087 struct list_head *post_sgl_list,
18088 int post_cnt)
18089 {
18090 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
18091 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18092 struct sgl_page_pairs *sgl_pg_pairs;
18093 void *viraddr;
18094 LPFC_MBOXQ_t *mbox;
18095 uint32_t reqlen, alloclen, pg_pairs;
18096 uint32_t mbox_tmo;
18097 uint16_t xritag_start = 0;
18098 int rc = 0;
18099 uint32_t shdr_status, shdr_add_status;
18100 union lpfc_sli4_cfg_shdr *shdr;
18101
18102 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
18103 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18104 if (reqlen > SLI4_PAGE_SIZE) {
18105 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18106 "2559 Block sgl registration required DMA "
18107 "size (%d) great than a page\n", reqlen);
18108 return -ENOMEM;
18109 }
18110
18111 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18112 if (!mbox)
18113 return -ENOMEM;
18114
18115 /* Allocate DMA memory and set up the non-embedded mailbox command */
18116 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18117 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
18118 LPFC_SLI4_MBX_NEMBED);
18119
18120 if (alloclen < reqlen) {
18121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18122 "0285 Allocated DMA memory size (%d) is "
18123 "less than the requested DMA memory "
18124 "size (%d)\n", alloclen, reqlen);
18125 lpfc_sli4_mbox_cmd_free(phba, mbox);
18126 return -ENOMEM;
18127 }
18128 /* Set up the SGL pages in the non-embedded DMA pages */
18129 viraddr = mbox->sge_array->addr[0];
18130 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18131 sgl_pg_pairs = &sgl->sgl_pg_pairs;
18132
18133 pg_pairs = 0;
18134 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
18135 /* Set up the sge entry */
18136 sgl_pg_pairs->sgl_pg0_addr_lo =
18137 cpu_to_le32(putPaddrLow(sglq_entry->phys));
18138 sgl_pg_pairs->sgl_pg0_addr_hi =
18139 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
18140 sgl_pg_pairs->sgl_pg1_addr_lo =
18141 cpu_to_le32(putPaddrLow(0));
18142 sgl_pg_pairs->sgl_pg1_addr_hi =
18143 cpu_to_le32(putPaddrHigh(0));
18144
18145 /* Keep the first xritag on the list */
18146 if (pg_pairs == 0)
18147 xritag_start = sglq_entry->sli4_xritag;
18148 sgl_pg_pairs++;
18149 pg_pairs++;
18150 }
18151
18152 /* Complete initialization and perform endian conversion. */
18153 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18154 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
18155 sgl->word0 = cpu_to_le32(sgl->word0);
18156
18157 if (!phba->sli4_hba.intr_enable)
18158 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18159 else {
18160 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18161 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18162 }
18163 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
18164 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18165 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18166 if (!phba->sli4_hba.intr_enable)
18167 lpfc_sli4_mbox_cmd_free(phba, mbox);
18168 else if (rc != MBX_TIMEOUT)
18169 lpfc_sli4_mbox_cmd_free(phba, mbox);
18170 if (shdr_status || shdr_add_status || rc) {
18171 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18172 "2513 POST_SGL_BLOCK mailbox command failed "
18173 "status x%x add_status x%x mbx status x%x\n",
18174 shdr_status, shdr_add_status, rc);
18175 rc = -ENXIO;
18176 }
18177 return rc;
18178 }
18179
18180 /**
18181 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
18182 * @phba: pointer to lpfc hba data structure.
18183 * @nblist: pointer to nvme buffer list.
18184 * @count: number of scsi buffers on the list.
18185 *
18186 * This routine is invoked to post a block of @count scsi sgl pages from a
18187 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
18188 * No Lock is held.
18189 *
18190 **/
18191 static int
lpfc_sli4_post_io_sgl_block(struct lpfc_hba * phba,struct list_head * nblist,int count)18192 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
18193 int count)
18194 {
18195 struct lpfc_io_buf *lpfc_ncmd;
18196 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18197 struct sgl_page_pairs *sgl_pg_pairs;
18198 void *viraddr;
18199 LPFC_MBOXQ_t *mbox;
18200 uint32_t reqlen, alloclen, pg_pairs;
18201 uint32_t mbox_tmo;
18202 uint16_t xritag_start = 0;
18203 int rc = 0;
18204 uint32_t shdr_status, shdr_add_status;
18205 dma_addr_t pdma_phys_bpl1;
18206 union lpfc_sli4_cfg_shdr *shdr;
18207
18208 /* Calculate the requested length of the dma memory */
18209 reqlen = count * sizeof(struct sgl_page_pairs) +
18210 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18211 if (reqlen > SLI4_PAGE_SIZE) {
18212 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
18213 "6118 Block sgl registration required DMA "
18214 "size (%d) great than a page\n", reqlen);
18215 return -ENOMEM;
18216 }
18217 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18218 if (!mbox) {
18219 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18220 "6119 Failed to allocate mbox cmd memory\n");
18221 return -ENOMEM;
18222 }
18223
18224 /* Allocate DMA memory and set up the non-embedded mailbox command */
18225 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18226 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18227 reqlen, LPFC_SLI4_MBX_NEMBED);
18228
18229 if (alloclen < reqlen) {
18230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18231 "6120 Allocated DMA memory size (%d) is "
18232 "less than the requested DMA memory "
18233 "size (%d)\n", alloclen, reqlen);
18234 lpfc_sli4_mbox_cmd_free(phba, mbox);
18235 return -ENOMEM;
18236 }
18237
18238 /* Get the first SGE entry from the non-embedded DMA memory */
18239 viraddr = mbox->sge_array->addr[0];
18240
18241 /* Set up the SGL pages in the non-embedded DMA pages */
18242 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18243 sgl_pg_pairs = &sgl->sgl_pg_pairs;
18244
18245 pg_pairs = 0;
18246 list_for_each_entry(lpfc_ncmd, nblist, list) {
18247 /* Set up the sge entry */
18248 sgl_pg_pairs->sgl_pg0_addr_lo =
18249 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
18250 sgl_pg_pairs->sgl_pg0_addr_hi =
18251 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
18252 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
18253 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
18254 SGL_PAGE_SIZE;
18255 else
18256 pdma_phys_bpl1 = 0;
18257 sgl_pg_pairs->sgl_pg1_addr_lo =
18258 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
18259 sgl_pg_pairs->sgl_pg1_addr_hi =
18260 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
18261 /* Keep the first xritag on the list */
18262 if (pg_pairs == 0)
18263 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
18264 sgl_pg_pairs++;
18265 pg_pairs++;
18266 }
18267 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18268 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
18269 /* Perform endian conversion if necessary */
18270 sgl->word0 = cpu_to_le32(sgl->word0);
18271
18272 if (!phba->sli4_hba.intr_enable) {
18273 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18274 } else {
18275 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18276 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18277 }
18278 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
18279 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18280 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18281 if (!phba->sli4_hba.intr_enable)
18282 lpfc_sli4_mbox_cmd_free(phba, mbox);
18283 else if (rc != MBX_TIMEOUT)
18284 lpfc_sli4_mbox_cmd_free(phba, mbox);
18285 if (shdr_status || shdr_add_status || rc) {
18286 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18287 "6125 POST_SGL_BLOCK mailbox command failed "
18288 "status x%x add_status x%x mbx status x%x\n",
18289 shdr_status, shdr_add_status, rc);
18290 rc = -ENXIO;
18291 }
18292 return rc;
18293 }
18294
18295 /**
18296 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
18297 * @phba: pointer to lpfc hba data structure.
18298 * @post_nblist: pointer to the nvme buffer list.
18299 * @sb_count: number of nvme buffers.
18300 *
18301 * This routine walks a list of nvme buffers that was passed in. It attempts
18302 * to construct blocks of nvme buffer sgls which contains contiguous xris and
18303 * uses the non-embedded SGL block post mailbox commands to post to the port.
18304 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
18305 * embedded SGL post mailbox command for posting. The @post_nblist passed in
18306 * must be local list, thus no lock is needed when manipulate the list.
18307 *
18308 * Returns: 0 = failure, non-zero number of successfully posted buffers.
18309 **/
18310 int
lpfc_sli4_post_io_sgl_list(struct lpfc_hba * phba,struct list_head * post_nblist,int sb_count)18311 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
18312 struct list_head *post_nblist, int sb_count)
18313 {
18314 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
18315 int status, sgl_size;
18316 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
18317 dma_addr_t pdma_phys_sgl1;
18318 int last_xritag = NO_XRI;
18319 int cur_xritag;
18320 LIST_HEAD(prep_nblist);
18321 LIST_HEAD(blck_nblist);
18322 LIST_HEAD(nvme_nblist);
18323
18324 /* sanity check */
18325 if (sb_count <= 0)
18326 return -EINVAL;
18327
18328 sgl_size = phba->cfg_sg_dma_buf_size;
18329 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
18330 list_del_init(&lpfc_ncmd->list);
18331 block_cnt++;
18332 if ((last_xritag != NO_XRI) &&
18333 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
18334 /* a hole in xri block, form a sgl posting block */
18335 list_splice_init(&prep_nblist, &blck_nblist);
18336 post_cnt = block_cnt - 1;
18337 /* prepare list for next posting block */
18338 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18339 block_cnt = 1;
18340 } else {
18341 /* prepare list for next posting block */
18342 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18343 /* enough sgls for non-embed sgl mbox command */
18344 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18345 list_splice_init(&prep_nblist, &blck_nblist);
18346 post_cnt = block_cnt;
18347 block_cnt = 0;
18348 }
18349 }
18350 num_posting++;
18351 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18352
18353 /* end of repost sgl list condition for NVME buffers */
18354 if (num_posting == sb_count) {
18355 if (post_cnt == 0) {
18356 /* last sgl posting block */
18357 list_splice_init(&prep_nblist, &blck_nblist);
18358 post_cnt = block_cnt;
18359 } else if (block_cnt == 1) {
18360 /* last single sgl with non-contiguous xri */
18361 if (sgl_size > SGL_PAGE_SIZE)
18362 pdma_phys_sgl1 =
18363 lpfc_ncmd->dma_phys_sgl +
18364 SGL_PAGE_SIZE;
18365 else
18366 pdma_phys_sgl1 = 0;
18367 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18368 status = lpfc_sli4_post_sgl(
18369 phba, lpfc_ncmd->dma_phys_sgl,
18370 pdma_phys_sgl1, cur_xritag);
18371 if (status) {
18372 /* Post error. Buffer unavailable. */
18373 lpfc_ncmd->flags |=
18374 LPFC_SBUF_NOT_POSTED;
18375 } else {
18376 /* Post success. Bffer available. */
18377 lpfc_ncmd->flags &=
18378 ~LPFC_SBUF_NOT_POSTED;
18379 lpfc_ncmd->status = IOSTAT_SUCCESS;
18380 num_posted++;
18381 }
18382 /* success, put on NVME buffer sgl list */
18383 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18384 }
18385 }
18386
18387 /* continue until a nembed page worth of sgls */
18388 if (post_cnt == 0)
18389 continue;
18390
18391 /* post block of NVME buffer list sgls */
18392 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18393 post_cnt);
18394
18395 /* don't reset xirtag due to hole in xri block */
18396 if (block_cnt == 0)
18397 last_xritag = NO_XRI;
18398
18399 /* reset NVME buffer post count for next round of posting */
18400 post_cnt = 0;
18401
18402 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18403 while (!list_empty(&blck_nblist)) {
18404 list_remove_head(&blck_nblist, lpfc_ncmd,
18405 struct lpfc_io_buf, list);
18406 if (status) {
18407 /* Post error. Mark buffer unavailable. */
18408 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18409 } else {
18410 /* Post success, Mark buffer available. */
18411 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18412 lpfc_ncmd->status = IOSTAT_SUCCESS;
18413 num_posted++;
18414 }
18415 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18416 }
18417 }
18418 /* Push NVME buffers with sgl posted to the available list */
18419 lpfc_io_buf_replenish(phba, &nvme_nblist);
18420
18421 return num_posted;
18422 }
18423
18424 /**
18425 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18426 * @phba: pointer to lpfc_hba struct that the frame was received on
18427 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18428 *
18429 * This function checks the fields in the @fc_hdr to see if the FC frame is a
18430 * valid type of frame that the LPFC driver will handle. This function will
18431 * return a zero if the frame is a valid frame or a non zero value when the
18432 * frame does not pass the check.
18433 **/
18434 static int
lpfc_fc_frame_check(struct lpfc_hba * phba,struct fc_frame_header * fc_hdr)18435 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18436 {
18437 /* make rctl_names static to save stack space */
18438 struct fc_vft_header *fc_vft_hdr;
18439 uint32_t *header = (uint32_t *) fc_hdr;
18440
18441 #define FC_RCTL_MDS_DIAGS 0xF4
18442
18443 switch (fc_hdr->fh_r_ctl) {
18444 case FC_RCTL_DD_UNCAT: /* uncategorized information */
18445 case FC_RCTL_DD_SOL_DATA: /* solicited data */
18446 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
18447 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
18448 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
18449 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
18450 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
18451 case FC_RCTL_DD_CMD_STATUS: /* command status */
18452 case FC_RCTL_ELS_REQ: /* extended link services request */
18453 case FC_RCTL_ELS_REP: /* extended link services reply */
18454 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
18455 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
18456 case FC_RCTL_BA_ABTS: /* basic link service abort */
18457 case FC_RCTL_BA_RMC: /* remove connection */
18458 case FC_RCTL_BA_ACC: /* basic accept */
18459 case FC_RCTL_BA_RJT: /* basic reject */
18460 case FC_RCTL_BA_PRMT:
18461 case FC_RCTL_ACK_1: /* acknowledge_1 */
18462 case FC_RCTL_ACK_0: /* acknowledge_0 */
18463 case FC_RCTL_P_RJT: /* port reject */
18464 case FC_RCTL_F_RJT: /* fabric reject */
18465 case FC_RCTL_P_BSY: /* port busy */
18466 case FC_RCTL_F_BSY: /* fabric busy to data frame */
18467 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
18468 case FC_RCTL_LCR: /* link credit reset */
18469 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18470 case FC_RCTL_END: /* end */
18471 break;
18472 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
18473 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18474 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18475 return lpfc_fc_frame_check(phba, fc_hdr);
18476 case FC_RCTL_BA_NOP: /* basic link service NOP */
18477 default:
18478 goto drop;
18479 }
18480
18481 switch (fc_hdr->fh_type) {
18482 case FC_TYPE_BLS:
18483 case FC_TYPE_ELS:
18484 case FC_TYPE_FCP:
18485 case FC_TYPE_CT:
18486 case FC_TYPE_NVME:
18487 break;
18488 case FC_TYPE_IP:
18489 case FC_TYPE_ILS:
18490 default:
18491 goto drop;
18492 }
18493
18494 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18495 "2538 Received frame rctl:x%x, type:x%x, "
18496 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18497 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18498 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18499 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18500 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18501 be32_to_cpu(header[6]));
18502 return 0;
18503 drop:
18504 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18505 "2539 Dropped frame rctl:x%x type:x%x\n",
18506 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18507 return 1;
18508 }
18509
18510 /**
18511 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18512 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18513 *
18514 * This function processes the FC header to retrieve the VFI from the VF
18515 * header, if one exists. This function will return the VFI if one exists
18516 * or 0 if no VSAN Header exists.
18517 **/
18518 static uint32_t
lpfc_fc_hdr_get_vfi(struct fc_frame_header * fc_hdr)18519 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18520 {
18521 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18522
18523 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18524 return 0;
18525 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18526 }
18527
18528 /**
18529 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18530 * @phba: Pointer to the HBA structure to search for the vport on
18531 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18532 * @fcfi: The FC Fabric ID that the frame came from
18533 * @did: Destination ID to match against
18534 *
18535 * This function searches the @phba for a vport that matches the content of the
18536 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18537 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18538 * returns the matching vport pointer or NULL if unable to match frame to a
18539 * vport.
18540 **/
18541 static struct lpfc_vport *
lpfc_fc_frame_to_vport(struct lpfc_hba * phba,struct fc_frame_header * fc_hdr,uint16_t fcfi,uint32_t did)18542 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18543 uint16_t fcfi, uint32_t did)
18544 {
18545 struct lpfc_vport **vports;
18546 struct lpfc_vport *vport = NULL;
18547 int i;
18548
18549 if (did == Fabric_DID)
18550 return phba->pport;
18551 if ((phba->pport->fc_flag & FC_PT2PT) &&
18552 !(phba->link_state == LPFC_HBA_READY))
18553 return phba->pport;
18554
18555 vports = lpfc_create_vport_work_array(phba);
18556 if (vports != NULL) {
18557 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18558 if (phba->fcf.fcfi == fcfi &&
18559 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18560 vports[i]->fc_myDID == did) {
18561 vport = vports[i];
18562 break;
18563 }
18564 }
18565 }
18566 lpfc_destroy_vport_work_array(phba, vports);
18567 return vport;
18568 }
18569
18570 /**
18571 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18572 * @vport: The vport to work on.
18573 *
18574 * This function updates the receive sequence time stamp for this vport. The
18575 * receive sequence time stamp indicates the time that the last frame of the
18576 * the sequence that has been idle for the longest amount of time was received.
18577 * the driver uses this time stamp to indicate if any received sequences have
18578 * timed out.
18579 **/
18580 static void
lpfc_update_rcv_time_stamp(struct lpfc_vport * vport)18581 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18582 {
18583 struct lpfc_dmabuf *h_buf;
18584 struct hbq_dmabuf *dmabuf = NULL;
18585
18586 /* get the oldest sequence on the rcv list */
18587 h_buf = list_get_first(&vport->rcv_buffer_list,
18588 struct lpfc_dmabuf, list);
18589 if (!h_buf)
18590 return;
18591 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18592 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18593 }
18594
18595 /**
18596 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18597 * @vport: The vport that the received sequences were sent to.
18598 *
18599 * This function cleans up all outstanding received sequences. This is called
18600 * by the driver when a link event or user action invalidates all the received
18601 * sequences.
18602 **/
18603 void
lpfc_cleanup_rcv_buffers(struct lpfc_vport * vport)18604 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18605 {
18606 struct lpfc_dmabuf *h_buf, *hnext;
18607 struct lpfc_dmabuf *d_buf, *dnext;
18608 struct hbq_dmabuf *dmabuf = NULL;
18609
18610 /* start with the oldest sequence on the rcv list */
18611 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18612 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18613 list_del_init(&dmabuf->hbuf.list);
18614 list_for_each_entry_safe(d_buf, dnext,
18615 &dmabuf->dbuf.list, list) {
18616 list_del_init(&d_buf->list);
18617 lpfc_in_buf_free(vport->phba, d_buf);
18618 }
18619 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18620 }
18621 }
18622
18623 /**
18624 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18625 * @vport: The vport that the received sequences were sent to.
18626 *
18627 * This function determines whether any received sequences have timed out by
18628 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18629 * indicates that there is at least one timed out sequence this routine will
18630 * go through the received sequences one at a time from most inactive to most
18631 * active to determine which ones need to be cleaned up. Once it has determined
18632 * that a sequence needs to be cleaned up it will simply free up the resources
18633 * without sending an abort.
18634 **/
18635 void
lpfc_rcv_seq_check_edtov(struct lpfc_vport * vport)18636 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18637 {
18638 struct lpfc_dmabuf *h_buf, *hnext;
18639 struct lpfc_dmabuf *d_buf, *dnext;
18640 struct hbq_dmabuf *dmabuf = NULL;
18641 unsigned long timeout;
18642 int abort_count = 0;
18643
18644 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18645 vport->rcv_buffer_time_stamp);
18646 if (list_empty(&vport->rcv_buffer_list) ||
18647 time_before(jiffies, timeout))
18648 return;
18649 /* start with the oldest sequence on the rcv list */
18650 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18651 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18652 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18653 dmabuf->time_stamp);
18654 if (time_before(jiffies, timeout))
18655 break;
18656 abort_count++;
18657 list_del_init(&dmabuf->hbuf.list);
18658 list_for_each_entry_safe(d_buf, dnext,
18659 &dmabuf->dbuf.list, list) {
18660 list_del_init(&d_buf->list);
18661 lpfc_in_buf_free(vport->phba, d_buf);
18662 }
18663 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18664 }
18665 if (abort_count)
18666 lpfc_update_rcv_time_stamp(vport);
18667 }
18668
18669 /**
18670 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18671 * @vport: pointer to a vitural port
18672 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18673 *
18674 * This function searches through the existing incomplete sequences that have
18675 * been sent to this @vport. If the frame matches one of the incomplete
18676 * sequences then the dbuf in the @dmabuf is added to the list of frames that
18677 * make up that sequence. If no sequence is found that matches this frame then
18678 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18679 * This function returns a pointer to the first dmabuf in the sequence list that
18680 * the frame was linked to.
18681 **/
18682 static struct hbq_dmabuf *
lpfc_fc_frame_add(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18683 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18684 {
18685 struct fc_frame_header *new_hdr;
18686 struct fc_frame_header *temp_hdr;
18687 struct lpfc_dmabuf *d_buf;
18688 struct lpfc_dmabuf *h_buf;
18689 struct hbq_dmabuf *seq_dmabuf = NULL;
18690 struct hbq_dmabuf *temp_dmabuf = NULL;
18691 uint8_t found = 0;
18692
18693 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18694 dmabuf->time_stamp = jiffies;
18695 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18696
18697 /* Use the hdr_buf to find the sequence that this frame belongs to */
18698 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18699 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18700 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18701 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18702 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18703 continue;
18704 /* found a pending sequence that matches this frame */
18705 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18706 break;
18707 }
18708 if (!seq_dmabuf) {
18709 /*
18710 * This indicates first frame received for this sequence.
18711 * Queue the buffer on the vport's rcv_buffer_list.
18712 */
18713 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18714 lpfc_update_rcv_time_stamp(vport);
18715 return dmabuf;
18716 }
18717 temp_hdr = seq_dmabuf->hbuf.virt;
18718 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18719 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18720 list_del_init(&seq_dmabuf->hbuf.list);
18721 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18722 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18723 lpfc_update_rcv_time_stamp(vport);
18724 return dmabuf;
18725 }
18726 /* move this sequence to the tail to indicate a young sequence */
18727 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18728 seq_dmabuf->time_stamp = jiffies;
18729 lpfc_update_rcv_time_stamp(vport);
18730 if (list_empty(&seq_dmabuf->dbuf.list)) {
18731 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18732 return seq_dmabuf;
18733 }
18734 /* find the correct place in the sequence to insert this frame */
18735 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18736 while (!found) {
18737 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18738 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18739 /*
18740 * If the frame's sequence count is greater than the frame on
18741 * the list then insert the frame right after this frame
18742 */
18743 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18744 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18745 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18746 found = 1;
18747 break;
18748 }
18749
18750 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18751 break;
18752 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18753 }
18754
18755 if (found)
18756 return seq_dmabuf;
18757 return NULL;
18758 }
18759
18760 /**
18761 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18762 * @vport: pointer to a vitural port
18763 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18764 *
18765 * This function tries to abort from the partially assembed sequence, described
18766 * by the information from basic abbort @dmabuf. It checks to see whether such
18767 * partially assembled sequence held by the driver. If so, it shall free up all
18768 * the frames from the partially assembled sequence.
18769 *
18770 * Return
18771 * true -- if there is matching partially assembled sequence present and all
18772 * the frames freed with the sequence;
18773 * false -- if there is no matching partially assembled sequence present so
18774 * nothing got aborted in the lower layer driver
18775 **/
18776 static bool
lpfc_sli4_abort_partial_seq(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18777 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18778 struct hbq_dmabuf *dmabuf)
18779 {
18780 struct fc_frame_header *new_hdr;
18781 struct fc_frame_header *temp_hdr;
18782 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18783 struct hbq_dmabuf *seq_dmabuf = NULL;
18784
18785 /* Use the hdr_buf to find the sequence that matches this frame */
18786 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18787 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18788 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18789 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18790 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18791 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18792 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18793 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18794 continue;
18795 /* found a pending sequence that matches this frame */
18796 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18797 break;
18798 }
18799
18800 /* Free up all the frames from the partially assembled sequence */
18801 if (seq_dmabuf) {
18802 list_for_each_entry_safe(d_buf, n_buf,
18803 &seq_dmabuf->dbuf.list, list) {
18804 list_del_init(&d_buf->list);
18805 lpfc_in_buf_free(vport->phba, d_buf);
18806 }
18807 return true;
18808 }
18809 return false;
18810 }
18811
18812 /**
18813 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18814 * @vport: pointer to a vitural port
18815 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18816 *
18817 * This function tries to abort from the assembed sequence from upper level
18818 * protocol, described by the information from basic abbort @dmabuf. It
18819 * checks to see whether such pending context exists at upper level protocol.
18820 * If so, it shall clean up the pending context.
18821 *
18822 * Return
18823 * true -- if there is matching pending context of the sequence cleaned
18824 * at ulp;
18825 * false -- if there is no matching pending context of the sequence present
18826 * at ulp.
18827 **/
18828 static bool
lpfc_sli4_abort_ulp_seq(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18829 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18830 {
18831 struct lpfc_hba *phba = vport->phba;
18832 int handled;
18833
18834 /* Accepting abort at ulp with SLI4 only */
18835 if (phba->sli_rev < LPFC_SLI_REV4)
18836 return false;
18837
18838 /* Register all caring upper level protocols to attend abort */
18839 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18840 if (handled)
18841 return true;
18842
18843 return false;
18844 }
18845
18846 /**
18847 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18848 * @phba: Pointer to HBA context object.
18849 * @cmd_iocbq: pointer to the command iocbq structure.
18850 * @rsp_iocbq: pointer to the response iocbq structure.
18851 *
18852 * This function handles the sequence abort response iocb command complete
18853 * event. It properly releases the memory allocated to the sequence abort
18854 * accept iocb.
18855 **/
18856 static void
lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmd_iocbq,struct lpfc_iocbq * rsp_iocbq)18857 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18858 struct lpfc_iocbq *cmd_iocbq,
18859 struct lpfc_iocbq *rsp_iocbq)
18860 {
18861 if (cmd_iocbq) {
18862 lpfc_nlp_put(cmd_iocbq->ndlp);
18863 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18864 }
18865
18866 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18867 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18868 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18869 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18870 get_job_ulpstatus(phba, rsp_iocbq),
18871 get_job_word4(phba, rsp_iocbq));
18872 }
18873
18874 /**
18875 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18876 * @phba: Pointer to HBA context object.
18877 * @xri: xri id in transaction.
18878 *
18879 * This function validates the xri maps to the known range of XRIs allocated an
18880 * used by the driver.
18881 **/
18882 uint16_t
lpfc_sli4_xri_inrange(struct lpfc_hba * phba,uint16_t xri)18883 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18884 uint16_t xri)
18885 {
18886 uint16_t i;
18887
18888 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18889 if (xri == phba->sli4_hba.xri_ids[i])
18890 return i;
18891 }
18892 return NO_XRI;
18893 }
18894
18895 /**
18896 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18897 * @vport: pointer to a virtual port.
18898 * @fc_hdr: pointer to a FC frame header.
18899 * @aborted: was the partially assembled receive sequence successfully aborted
18900 *
18901 * This function sends a basic response to a previous unsol sequence abort
18902 * event after aborting the sequence handling.
18903 **/
18904 void
lpfc_sli4_seq_abort_rsp(struct lpfc_vport * vport,struct fc_frame_header * fc_hdr,bool aborted)18905 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18906 struct fc_frame_header *fc_hdr, bool aborted)
18907 {
18908 struct lpfc_hba *phba = vport->phba;
18909 struct lpfc_iocbq *ctiocb = NULL;
18910 struct lpfc_nodelist *ndlp;
18911 uint16_t oxid, rxid, xri, lxri;
18912 uint32_t sid, fctl;
18913 union lpfc_wqe128 *icmd;
18914 int rc;
18915
18916 if (!lpfc_is_link_up(phba))
18917 return;
18918
18919 sid = sli4_sid_from_fc_hdr(fc_hdr);
18920 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18921 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18922
18923 ndlp = lpfc_findnode_did(vport, sid);
18924 if (!ndlp) {
18925 ndlp = lpfc_nlp_init(vport, sid);
18926 if (!ndlp) {
18927 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18928 "1268 Failed to allocate ndlp for "
18929 "oxid:x%x SID:x%x\n", oxid, sid);
18930 return;
18931 }
18932 /* Put ndlp onto pport node list */
18933 lpfc_enqueue_node(vport, ndlp);
18934 }
18935
18936 /* Allocate buffer for rsp iocb */
18937 ctiocb = lpfc_sli_get_iocbq(phba);
18938 if (!ctiocb)
18939 return;
18940
18941 icmd = &ctiocb->wqe;
18942
18943 /* Extract the F_CTL field from FC_HDR */
18944 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18945
18946 ctiocb->ndlp = lpfc_nlp_get(ndlp);
18947 if (!ctiocb->ndlp) {
18948 lpfc_sli_release_iocbq(phba, ctiocb);
18949 return;
18950 }
18951
18952 ctiocb->vport = phba->pport;
18953 ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18954 ctiocb->sli4_lxritag = NO_XRI;
18955 ctiocb->sli4_xritag = NO_XRI;
18956 ctiocb->abort_rctl = FC_RCTL_BA_ACC;
18957
18958 if (fctl & FC_FC_EX_CTX)
18959 /* Exchange responder sent the abort so we
18960 * own the oxid.
18961 */
18962 xri = oxid;
18963 else
18964 xri = rxid;
18965 lxri = lpfc_sli4_xri_inrange(phba, xri);
18966 if (lxri != NO_XRI)
18967 lpfc_set_rrq_active(phba, ndlp, lxri,
18968 (xri == oxid) ? rxid : oxid, 0);
18969 /* For BA_ABTS from exchange responder, if the logical xri with
18970 * the oxid maps to the FCP XRI range, the port no longer has
18971 * that exchange context, send a BLS_RJT. Override the IOCB for
18972 * a BA_RJT.
18973 */
18974 if ((fctl & FC_FC_EX_CTX) &&
18975 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18976 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18977 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18978 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18979 FC_BA_RJT_INV_XID);
18980 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18981 FC_BA_RJT_UNABLE);
18982 }
18983
18984 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18985 * the driver no longer has that exchange, send a BLS_RJT. Override
18986 * the IOCB for a BA_RJT.
18987 */
18988 if (aborted == false) {
18989 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18990 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18991 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18992 FC_BA_RJT_INV_XID);
18993 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18994 FC_BA_RJT_UNABLE);
18995 }
18996
18997 if (fctl & FC_FC_EX_CTX) {
18998 /* ABTS sent by responder to CT exchange, construction
18999 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
19000 * field and RX_ID from ABTS for RX_ID field.
19001 */
19002 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
19003 bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
19004 } else {
19005 /* ABTS sent by initiator to CT exchange, construction
19006 * of BA_ACC will need to allocate a new XRI as for the
19007 * XRI_TAG field.
19008 */
19009 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
19010 }
19011
19012 /* OX_ID is invariable to who sent ABTS to CT exchange */
19013 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
19014 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
19015
19016 /* Use CT=VPI */
19017 bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
19018 ndlp->nlp_DID);
19019 bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
19020 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
19021 bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
19022
19023 /* Xmit CT abts response on exchange <xid> */
19024 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
19025 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
19026 ctiocb->abort_rctl, oxid, phba->link_state);
19027
19028 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
19029 if (rc == IOCB_ERROR) {
19030 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19031 "2925 Failed to issue CT ABTS RSP x%x on "
19032 "xri x%x, Data x%x\n",
19033 ctiocb->abort_rctl, oxid,
19034 phba->link_state);
19035 lpfc_nlp_put(ndlp);
19036 ctiocb->ndlp = NULL;
19037 lpfc_sli_release_iocbq(phba, ctiocb);
19038 }
19039 }
19040
19041 /**
19042 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
19043 * @vport: Pointer to the vport on which this sequence was received
19044 * @dmabuf: pointer to a dmabuf that describes the FC sequence
19045 *
19046 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
19047 * receive sequence is only partially assembed by the driver, it shall abort
19048 * the partially assembled frames for the sequence. Otherwise, if the
19049 * unsolicited receive sequence has been completely assembled and passed to
19050 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
19051 * unsolicited sequence has been aborted. After that, it will issue a basic
19052 * accept to accept the abort.
19053 **/
19054 static void
lpfc_sli4_handle_unsol_abort(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)19055 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
19056 struct hbq_dmabuf *dmabuf)
19057 {
19058 struct lpfc_hba *phba = vport->phba;
19059 struct fc_frame_header fc_hdr;
19060 uint32_t fctl;
19061 bool aborted;
19062
19063 /* Make a copy of fc_hdr before the dmabuf being released */
19064 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
19065 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
19066
19067 if (fctl & FC_FC_EX_CTX) {
19068 /* ABTS by responder to exchange, no cleanup needed */
19069 aborted = true;
19070 } else {
19071 /* ABTS by initiator to exchange, need to do cleanup */
19072 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
19073 if (aborted == false)
19074 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
19075 }
19076 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19077
19078 if (phba->nvmet_support) {
19079 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
19080 return;
19081 }
19082
19083 /* Respond with BA_ACC or BA_RJT accordingly */
19084 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
19085 }
19086
19087 /**
19088 * lpfc_seq_complete - Indicates if a sequence is complete
19089 * @dmabuf: pointer to a dmabuf that describes the FC sequence
19090 *
19091 * This function checks the sequence, starting with the frame described by
19092 * @dmabuf, to see if all the frames associated with this sequence are present.
19093 * the frames associated with this sequence are linked to the @dmabuf using the
19094 * dbuf list. This function looks for two major things. 1) That the first frame
19095 * has a sequence count of zero. 2) There is a frame with last frame of sequence
19096 * set. 3) That there are no holes in the sequence count. The function will
19097 * return 1 when the sequence is complete, otherwise it will return 0.
19098 **/
19099 static int
lpfc_seq_complete(struct hbq_dmabuf * dmabuf)19100 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
19101 {
19102 struct fc_frame_header *hdr;
19103 struct lpfc_dmabuf *d_buf;
19104 struct hbq_dmabuf *seq_dmabuf;
19105 uint32_t fctl;
19106 int seq_count = 0;
19107
19108 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19109 /* make sure first fame of sequence has a sequence count of zero */
19110 if (hdr->fh_seq_cnt != seq_count)
19111 return 0;
19112 fctl = (hdr->fh_f_ctl[0] << 16 |
19113 hdr->fh_f_ctl[1] << 8 |
19114 hdr->fh_f_ctl[2]);
19115 /* If last frame of sequence we can return success. */
19116 if (fctl & FC_FC_END_SEQ)
19117 return 1;
19118 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
19119 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19120 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19121 /* If there is a hole in the sequence count then fail. */
19122 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
19123 return 0;
19124 fctl = (hdr->fh_f_ctl[0] << 16 |
19125 hdr->fh_f_ctl[1] << 8 |
19126 hdr->fh_f_ctl[2]);
19127 /* If last frame of sequence we can return success. */
19128 if (fctl & FC_FC_END_SEQ)
19129 return 1;
19130 }
19131 return 0;
19132 }
19133
19134 /**
19135 * lpfc_prep_seq - Prep sequence for ULP processing
19136 * @vport: Pointer to the vport on which this sequence was received
19137 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
19138 *
19139 * This function takes a sequence, described by a list of frames, and creates
19140 * a list of iocbq structures to describe the sequence. This iocbq list will be
19141 * used to issue to the generic unsolicited sequence handler. This routine
19142 * returns a pointer to the first iocbq in the list. If the function is unable
19143 * to allocate an iocbq then it throw out the received frames that were not
19144 * able to be described and return a pointer to the first iocbq. If unable to
19145 * allocate any iocbqs (including the first) this function will return NULL.
19146 **/
19147 static struct lpfc_iocbq *
lpfc_prep_seq(struct lpfc_vport * vport,struct hbq_dmabuf * seq_dmabuf)19148 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
19149 {
19150 struct hbq_dmabuf *hbq_buf;
19151 struct lpfc_dmabuf *d_buf, *n_buf;
19152 struct lpfc_iocbq *first_iocbq, *iocbq;
19153 struct fc_frame_header *fc_hdr;
19154 uint32_t sid;
19155 uint32_t len, tot_len;
19156
19157 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19158 /* remove from receive buffer list */
19159 list_del_init(&seq_dmabuf->hbuf.list);
19160 lpfc_update_rcv_time_stamp(vport);
19161 /* get the Remote Port's SID */
19162 sid = sli4_sid_from_fc_hdr(fc_hdr);
19163 tot_len = 0;
19164 /* Get an iocbq struct to fill in. */
19165 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
19166 if (first_iocbq) {
19167 /* Initialize the first IOCB. */
19168 first_iocbq->wcqe_cmpl.total_data_placed = 0;
19169 bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
19170 IOSTAT_SUCCESS);
19171 first_iocbq->vport = vport;
19172
19173 /* Check FC Header to see what TYPE of frame we are rcv'ing */
19174 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
19175 bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
19176 sli4_did_from_fc_hdr(fc_hdr));
19177 }
19178
19179 bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19180 NO_XRI);
19181 bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19182 be16_to_cpu(fc_hdr->fh_ox_id));
19183
19184 /* put the first buffer into the first iocb */
19185 tot_len = bf_get(lpfc_rcqe_length,
19186 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
19187
19188 first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
19189 first_iocbq->bpl_dmabuf = NULL;
19190 /* Keep track of the BDE count */
19191 first_iocbq->wcqe_cmpl.word3 = 1;
19192
19193 if (tot_len > LPFC_DATA_BUF_SIZE)
19194 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
19195 LPFC_DATA_BUF_SIZE;
19196 else
19197 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
19198
19199 first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
19200 bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
19201 sid);
19202 }
19203 iocbq = first_iocbq;
19204 /*
19205 * Each IOCBq can have two Buffers assigned, so go through the list
19206 * of buffers for this sequence and save two buffers in each IOCBq
19207 */
19208 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
19209 if (!iocbq) {
19210 lpfc_in_buf_free(vport->phba, d_buf);
19211 continue;
19212 }
19213 if (!iocbq->bpl_dmabuf) {
19214 iocbq->bpl_dmabuf = d_buf;
19215 iocbq->wcqe_cmpl.word3++;
19216 /* We need to get the size out of the right CQE */
19217 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19218 len = bf_get(lpfc_rcqe_length,
19219 &hbq_buf->cq_event.cqe.rcqe_cmpl);
19220 iocbq->unsol_rcv_len = len;
19221 iocbq->wcqe_cmpl.total_data_placed += len;
19222 tot_len += len;
19223 } else {
19224 iocbq = lpfc_sli_get_iocbq(vport->phba);
19225 if (!iocbq) {
19226 if (first_iocbq) {
19227 bf_set(lpfc_wcqe_c_status,
19228 &first_iocbq->wcqe_cmpl,
19229 IOSTAT_SUCCESS);
19230 first_iocbq->wcqe_cmpl.parameter =
19231 IOERR_NO_RESOURCES;
19232 }
19233 lpfc_in_buf_free(vport->phba, d_buf);
19234 continue;
19235 }
19236 /* We need to get the size out of the right CQE */
19237 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19238 len = bf_get(lpfc_rcqe_length,
19239 &hbq_buf->cq_event.cqe.rcqe_cmpl);
19240 iocbq->cmd_dmabuf = d_buf;
19241 iocbq->bpl_dmabuf = NULL;
19242 iocbq->wcqe_cmpl.word3 = 1;
19243
19244 if (len > LPFC_DATA_BUF_SIZE)
19245 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19246 LPFC_DATA_BUF_SIZE;
19247 else
19248 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19249 len;
19250
19251 tot_len += len;
19252 iocbq->wcqe_cmpl.total_data_placed = tot_len;
19253 bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
19254 sid);
19255 list_add_tail(&iocbq->list, &first_iocbq->list);
19256 }
19257 }
19258 /* Free the sequence's header buffer */
19259 if (!first_iocbq)
19260 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
19261
19262 return first_iocbq;
19263 }
19264
19265 static void
lpfc_sli4_send_seq_to_ulp(struct lpfc_vport * vport,struct hbq_dmabuf * seq_dmabuf)19266 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
19267 struct hbq_dmabuf *seq_dmabuf)
19268 {
19269 struct fc_frame_header *fc_hdr;
19270 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
19271 struct lpfc_hba *phba = vport->phba;
19272
19273 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19274 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
19275 if (!iocbq) {
19276 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19277 "2707 Ring %d handler: Failed to allocate "
19278 "iocb Rctl x%x Type x%x received\n",
19279 LPFC_ELS_RING,
19280 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19281 return;
19282 }
19283 if (!lpfc_complete_unsol_iocb(phba,
19284 phba->sli4_hba.els_wq->pring,
19285 iocbq, fc_hdr->fh_r_ctl,
19286 fc_hdr->fh_type)) {
19287 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19288 "2540 Ring %d handler: unexpected Rctl "
19289 "x%x Type x%x received\n",
19290 LPFC_ELS_RING,
19291 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19292 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
19293 }
19294
19295 /* Free iocb created in lpfc_prep_seq */
19296 list_for_each_entry_safe(curr_iocb, next_iocb,
19297 &iocbq->list, list) {
19298 list_del_init(&curr_iocb->list);
19299 lpfc_sli_release_iocbq(phba, curr_iocb);
19300 }
19301 lpfc_sli_release_iocbq(phba, iocbq);
19302 }
19303
19304 static void
lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)19305 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
19306 struct lpfc_iocbq *rspiocb)
19307 {
19308 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
19309
19310 if (pcmd && pcmd->virt)
19311 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19312 kfree(pcmd);
19313 lpfc_sli_release_iocbq(phba, cmdiocb);
19314 lpfc_drain_txq(phba);
19315 }
19316
19317 static void
lpfc_sli4_handle_mds_loopback(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)19318 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
19319 struct hbq_dmabuf *dmabuf)
19320 {
19321 struct fc_frame_header *fc_hdr;
19322 struct lpfc_hba *phba = vport->phba;
19323 struct lpfc_iocbq *iocbq = NULL;
19324 union lpfc_wqe128 *pwqe;
19325 struct lpfc_dmabuf *pcmd = NULL;
19326 uint32_t frame_len;
19327 int rc;
19328 unsigned long iflags;
19329
19330 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19331 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
19332
19333 /* Send the received frame back */
19334 iocbq = lpfc_sli_get_iocbq(phba);
19335 if (!iocbq) {
19336 /* Queue cq event and wakeup worker thread to process it */
19337 spin_lock_irqsave(&phba->hbalock, iflags);
19338 list_add_tail(&dmabuf->cq_event.list,
19339 &phba->sli4_hba.sp_queue_event);
19340 phba->hba_flag |= HBA_SP_QUEUE_EVT;
19341 spin_unlock_irqrestore(&phba->hbalock, iflags);
19342 lpfc_worker_wake_up(phba);
19343 return;
19344 }
19345
19346 /* Allocate buffer for command payload */
19347 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19348 if (pcmd)
19349 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19350 &pcmd->phys);
19351 if (!pcmd || !pcmd->virt)
19352 goto exit;
19353
19354 INIT_LIST_HEAD(&pcmd->list);
19355
19356 /* copyin the payload */
19357 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19358
19359 iocbq->cmd_dmabuf = pcmd;
19360 iocbq->vport = vport;
19361 iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19362 iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19363 iocbq->num_bdes = 0;
19364
19365 pwqe = &iocbq->wqe;
19366 /* fill in BDE's for command */
19367 pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
19368 pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
19369 pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
19370 pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
19371
19372 pwqe->send_frame.frame_len = frame_len;
19373 pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19374 pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19375 pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19376 pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19377 pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19378 pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19379
19380 pwqe->generic.wqe_com.word7 = 0;
19381 pwqe->generic.wqe_com.word10 = 0;
19382
19383 bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19384 bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
19385 bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
19386 bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19387 bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19388 bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19389 bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19390 bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19391 bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19392 bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19393 bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19394 bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19395 pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19396
19397 iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19398
19399 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19400 if (rc == IOCB_ERROR)
19401 goto exit;
19402
19403 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19404 return;
19405
19406 exit:
19407 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19408 "2023 Unable to process MDS loopback frame\n");
19409 if (pcmd && pcmd->virt)
19410 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19411 kfree(pcmd);
19412 if (iocbq)
19413 lpfc_sli_release_iocbq(phba, iocbq);
19414 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19415 }
19416
19417 /**
19418 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19419 * @phba: Pointer to HBA context object.
19420 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19421 *
19422 * This function is called with no lock held. This function processes all
19423 * the received buffers and gives it to upper layers when a received buffer
19424 * indicates that it is the final frame in the sequence. The interrupt
19425 * service routine processes received buffers at interrupt contexts.
19426 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19427 * appropriate receive function when the final frame in a sequence is received.
19428 **/
19429 void
lpfc_sli4_handle_received_buffer(struct lpfc_hba * phba,struct hbq_dmabuf * dmabuf)19430 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19431 struct hbq_dmabuf *dmabuf)
19432 {
19433 struct hbq_dmabuf *seq_dmabuf;
19434 struct fc_frame_header *fc_hdr;
19435 struct lpfc_vport *vport;
19436 uint32_t fcfi;
19437 uint32_t did;
19438
19439 /* Process each received buffer */
19440 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19441
19442 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19443 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19444 vport = phba->pport;
19445 /* Handle MDS Loopback frames */
19446 if (!(phba->pport->load_flag & FC_UNLOADING))
19447 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19448 else
19449 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19450 return;
19451 }
19452
19453 /* check to see if this a valid type of frame */
19454 if (lpfc_fc_frame_check(phba, fc_hdr)) {
19455 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19456 return;
19457 }
19458
19459 if ((bf_get(lpfc_cqe_code,
19460 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19461 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19462 &dmabuf->cq_event.cqe.rcqe_cmpl);
19463 else
19464 fcfi = bf_get(lpfc_rcqe_fcf_id,
19465 &dmabuf->cq_event.cqe.rcqe_cmpl);
19466
19467 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19468 vport = phba->pport;
19469 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19470 "2023 MDS Loopback %d bytes\n",
19471 bf_get(lpfc_rcqe_length,
19472 &dmabuf->cq_event.cqe.rcqe_cmpl));
19473 /* Handle MDS Loopback frames */
19474 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19475 return;
19476 }
19477
19478 /* d_id this frame is directed to */
19479 did = sli4_did_from_fc_hdr(fc_hdr);
19480
19481 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19482 if (!vport) {
19483 /* throw out the frame */
19484 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19485 return;
19486 }
19487
19488 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19489 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19490 (did != Fabric_DID)) {
19491 /*
19492 * Throw out the frame if we are not pt2pt.
19493 * The pt2pt protocol allows for discovery frames
19494 * to be received without a registered VPI.
19495 */
19496 if (!(vport->fc_flag & FC_PT2PT) ||
19497 (phba->link_state == LPFC_HBA_READY)) {
19498 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19499 return;
19500 }
19501 }
19502
19503 /* Handle the basic abort sequence (BA_ABTS) event */
19504 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19505 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19506 return;
19507 }
19508
19509 /* Link this frame */
19510 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19511 if (!seq_dmabuf) {
19512 /* unable to add frame to vport - throw it out */
19513 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19514 return;
19515 }
19516 /* If not last frame in sequence continue processing frames. */
19517 if (!lpfc_seq_complete(seq_dmabuf))
19518 return;
19519
19520 /* Send the complete sequence to the upper layer protocol */
19521 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19522 }
19523
19524 /**
19525 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19526 * @phba: pointer to lpfc hba data structure.
19527 *
19528 * This routine is invoked to post rpi header templates to the
19529 * HBA consistent with the SLI-4 interface spec. This routine
19530 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19531 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19532 *
19533 * This routine does not require any locks. It's usage is expected
19534 * to be driver load or reset recovery when the driver is
19535 * sequential.
19536 *
19537 * Return codes
19538 * 0 - successful
19539 * -EIO - The mailbox failed to complete successfully.
19540 * When this error occurs, the driver is not guaranteed
19541 * to have any rpi regions posted to the device and
19542 * must either attempt to repost the regions or take a
19543 * fatal error.
19544 **/
19545 int
lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba * phba)19546 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19547 {
19548 struct lpfc_rpi_hdr *rpi_page;
19549 uint32_t rc = 0;
19550 uint16_t lrpi = 0;
19551
19552 /* SLI4 ports that support extents do not require RPI headers. */
19553 if (!phba->sli4_hba.rpi_hdrs_in_use)
19554 goto exit;
19555 if (phba->sli4_hba.extents_in_use)
19556 return -EIO;
19557
19558 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19559 /*
19560 * Assign the rpi headers a physical rpi only if the driver
19561 * has not initialized those resources. A port reset only
19562 * needs the headers posted.
19563 */
19564 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19565 LPFC_RPI_RSRC_RDY)
19566 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19567
19568 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19569 if (rc != MBX_SUCCESS) {
19570 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19571 "2008 Error %d posting all rpi "
19572 "headers\n", rc);
19573 rc = -EIO;
19574 break;
19575 }
19576 }
19577
19578 exit:
19579 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19580 LPFC_RPI_RSRC_RDY);
19581 return rc;
19582 }
19583
19584 /**
19585 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19586 * @phba: pointer to lpfc hba data structure.
19587 * @rpi_page: pointer to the rpi memory region.
19588 *
19589 * This routine is invoked to post a single rpi header to the
19590 * HBA consistent with the SLI-4 interface spec. This memory region
19591 * maps up to 64 rpi context regions.
19592 *
19593 * Return codes
19594 * 0 - successful
19595 * -ENOMEM - No available memory
19596 * -EIO - The mailbox failed to complete successfully.
19597 **/
19598 int
lpfc_sli4_post_rpi_hdr(struct lpfc_hba * phba,struct lpfc_rpi_hdr * rpi_page)19599 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19600 {
19601 LPFC_MBOXQ_t *mboxq;
19602 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19603 uint32_t rc = 0;
19604 uint32_t shdr_status, shdr_add_status;
19605 union lpfc_sli4_cfg_shdr *shdr;
19606
19607 /* SLI4 ports that support extents do not require RPI headers. */
19608 if (!phba->sli4_hba.rpi_hdrs_in_use)
19609 return rc;
19610 if (phba->sli4_hba.extents_in_use)
19611 return -EIO;
19612
19613 /* The port is notified of the header region via a mailbox command. */
19614 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19615 if (!mboxq) {
19616 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19617 "2001 Unable to allocate memory for issuing "
19618 "SLI_CONFIG_SPECIAL mailbox command\n");
19619 return -ENOMEM;
19620 }
19621
19622 /* Post all rpi memory regions to the port. */
19623 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19624 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19625 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19626 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19627 sizeof(struct lpfc_sli4_cfg_mhdr),
19628 LPFC_SLI4_MBX_EMBED);
19629
19630
19631 /* Post the physical rpi to the port for this rpi header. */
19632 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19633 rpi_page->start_rpi);
19634 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19635 hdr_tmpl, rpi_page->page_count);
19636
19637 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19638 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19639 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19640 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19641 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19642 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19643 mempool_free(mboxq, phba->mbox_mem_pool);
19644 if (shdr_status || shdr_add_status || rc) {
19645 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19646 "2514 POST_RPI_HDR mailbox failed with "
19647 "status x%x add_status x%x, mbx status x%x\n",
19648 shdr_status, shdr_add_status, rc);
19649 rc = -ENXIO;
19650 } else {
19651 /*
19652 * The next_rpi stores the next logical module-64 rpi value used
19653 * to post physical rpis in subsequent rpi postings.
19654 */
19655 spin_lock_irq(&phba->hbalock);
19656 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19657 spin_unlock_irq(&phba->hbalock);
19658 }
19659 return rc;
19660 }
19661
19662 /**
19663 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19664 * @phba: pointer to lpfc hba data structure.
19665 *
19666 * This routine is invoked to post rpi header templates to the
19667 * HBA consistent with the SLI-4 interface spec. This routine
19668 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19669 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19670 *
19671 * Returns
19672 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19673 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
19674 **/
19675 int
lpfc_sli4_alloc_rpi(struct lpfc_hba * phba)19676 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19677 {
19678 unsigned long rpi;
19679 uint16_t max_rpi, rpi_limit;
19680 uint16_t rpi_remaining, lrpi = 0;
19681 struct lpfc_rpi_hdr *rpi_hdr;
19682 unsigned long iflag;
19683
19684 /*
19685 * Fetch the next logical rpi. Because this index is logical,
19686 * the driver starts at 0 each time.
19687 */
19688 spin_lock_irqsave(&phba->hbalock, iflag);
19689 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19690 rpi_limit = phba->sli4_hba.next_rpi;
19691
19692 rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19693 if (rpi >= rpi_limit)
19694 rpi = LPFC_RPI_ALLOC_ERROR;
19695 else {
19696 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19697 phba->sli4_hba.max_cfg_param.rpi_used++;
19698 phba->sli4_hba.rpi_count++;
19699 }
19700 lpfc_printf_log(phba, KERN_INFO,
19701 LOG_NODE | LOG_DISCOVERY,
19702 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19703 (int) rpi, max_rpi, rpi_limit);
19704
19705 /*
19706 * Don't try to allocate more rpi header regions if the device limit
19707 * has been exhausted.
19708 */
19709 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19710 (phba->sli4_hba.rpi_count >= max_rpi)) {
19711 spin_unlock_irqrestore(&phba->hbalock, iflag);
19712 return rpi;
19713 }
19714
19715 /*
19716 * RPI header postings are not required for SLI4 ports capable of
19717 * extents.
19718 */
19719 if (!phba->sli4_hba.rpi_hdrs_in_use) {
19720 spin_unlock_irqrestore(&phba->hbalock, iflag);
19721 return rpi;
19722 }
19723
19724 /*
19725 * If the driver is running low on rpi resources, allocate another
19726 * page now. Note that the next_rpi value is used because
19727 * it represents how many are actually in use whereas max_rpi notes
19728 * how many are supported max by the device.
19729 */
19730 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19731 spin_unlock_irqrestore(&phba->hbalock, iflag);
19732 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19733 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19734 if (!rpi_hdr) {
19735 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19736 "2002 Error Could not grow rpi "
19737 "count\n");
19738 } else {
19739 lrpi = rpi_hdr->start_rpi;
19740 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19741 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19742 }
19743 }
19744
19745 return rpi;
19746 }
19747
19748 /**
19749 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19750 * @phba: pointer to lpfc hba data structure.
19751 * @rpi: rpi to free
19752 *
19753 * This routine is invoked to release an rpi to the pool of
19754 * available rpis maintained by the driver.
19755 **/
19756 static void
__lpfc_sli4_free_rpi(struct lpfc_hba * phba,int rpi)19757 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19758 {
19759 /*
19760 * if the rpi value indicates a prior unreg has already
19761 * been done, skip the unreg.
19762 */
19763 if (rpi == LPFC_RPI_ALLOC_ERROR)
19764 return;
19765
19766 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19767 phba->sli4_hba.rpi_count--;
19768 phba->sli4_hba.max_cfg_param.rpi_used--;
19769 } else {
19770 lpfc_printf_log(phba, KERN_INFO,
19771 LOG_NODE | LOG_DISCOVERY,
19772 "2016 rpi %x not inuse\n",
19773 rpi);
19774 }
19775 }
19776
19777 /**
19778 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19779 * @phba: pointer to lpfc hba data structure.
19780 * @rpi: rpi to free
19781 *
19782 * This routine is invoked to release an rpi to the pool of
19783 * available rpis maintained by the driver.
19784 **/
19785 void
lpfc_sli4_free_rpi(struct lpfc_hba * phba,int rpi)19786 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19787 {
19788 spin_lock_irq(&phba->hbalock);
19789 __lpfc_sli4_free_rpi(phba, rpi);
19790 spin_unlock_irq(&phba->hbalock);
19791 }
19792
19793 /**
19794 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19795 * @phba: pointer to lpfc hba data structure.
19796 *
19797 * This routine is invoked to remove the memory region that
19798 * provided rpi via a bitmask.
19799 **/
19800 void
lpfc_sli4_remove_rpis(struct lpfc_hba * phba)19801 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19802 {
19803 kfree(phba->sli4_hba.rpi_bmask);
19804 kfree(phba->sli4_hba.rpi_ids);
19805 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19806 }
19807
19808 /**
19809 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19810 * @ndlp: pointer to lpfc nodelist data structure.
19811 * @cmpl: completion call-back.
19812 * @arg: data to load as MBox 'caller buffer information'
19813 *
19814 * This routine is invoked to remove the memory region that
19815 * provided rpi via a bitmask.
19816 **/
19817 int
lpfc_sli4_resume_rpi(struct lpfc_nodelist * ndlp,void (* cmpl)(struct lpfc_hba *,LPFC_MBOXQ_t *),void * arg)19818 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19819 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19820 {
19821 LPFC_MBOXQ_t *mboxq;
19822 struct lpfc_hba *phba = ndlp->phba;
19823 int rc;
19824
19825 /* The port is notified of the header region via a mailbox command. */
19826 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19827 if (!mboxq)
19828 return -ENOMEM;
19829
19830 /* If cmpl assigned, then this nlp_get pairs with
19831 * lpfc_mbx_cmpl_resume_rpi.
19832 *
19833 * Else cmpl is NULL, then this nlp_get pairs with
19834 * lpfc_sli_def_mbox_cmpl.
19835 */
19836 if (!lpfc_nlp_get(ndlp)) {
19837 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19838 "2122 %s: Failed to get nlp ref\n",
19839 __func__);
19840 mempool_free(mboxq, phba->mbox_mem_pool);
19841 return -EIO;
19842 }
19843
19844 /* Post all rpi memory regions to the port. */
19845 lpfc_resume_rpi(mboxq, ndlp);
19846 if (cmpl) {
19847 mboxq->mbox_cmpl = cmpl;
19848 mboxq->ctx_buf = arg;
19849 } else
19850 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19851 mboxq->ctx_ndlp = ndlp;
19852 mboxq->vport = ndlp->vport;
19853 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19854 if (rc == MBX_NOT_FINISHED) {
19855 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19856 "2010 Resume RPI Mailbox failed "
19857 "status %d, mbxStatus x%x\n", rc,
19858 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19859 lpfc_nlp_put(ndlp);
19860 mempool_free(mboxq, phba->mbox_mem_pool);
19861 return -EIO;
19862 }
19863 return 0;
19864 }
19865
19866 /**
19867 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19868 * @vport: Pointer to the vport for which the vpi is being initialized
19869 *
19870 * This routine is invoked to activate a vpi with the port.
19871 *
19872 * Returns:
19873 * 0 success
19874 * -Evalue otherwise
19875 **/
19876 int
lpfc_sli4_init_vpi(struct lpfc_vport * vport)19877 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19878 {
19879 LPFC_MBOXQ_t *mboxq;
19880 int rc = 0;
19881 int retval = MBX_SUCCESS;
19882 uint32_t mbox_tmo;
19883 struct lpfc_hba *phba = vport->phba;
19884 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19885 if (!mboxq)
19886 return -ENOMEM;
19887 lpfc_init_vpi(phba, mboxq, vport->vpi);
19888 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19889 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19890 if (rc != MBX_SUCCESS) {
19891 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19892 "2022 INIT VPI Mailbox failed "
19893 "status %d, mbxStatus x%x\n", rc,
19894 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19895 retval = -EIO;
19896 }
19897 if (rc != MBX_TIMEOUT)
19898 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19899
19900 return retval;
19901 }
19902
19903 /**
19904 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19905 * @phba: pointer to lpfc hba data structure.
19906 * @mboxq: Pointer to mailbox object.
19907 *
19908 * This routine is invoked to manually add a single FCF record. The caller
19909 * must pass a completely initialized FCF_Record. This routine takes
19910 * care of the nonembedded mailbox operations.
19911 **/
19912 static void
lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)19913 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19914 {
19915 void *virt_addr;
19916 union lpfc_sli4_cfg_shdr *shdr;
19917 uint32_t shdr_status, shdr_add_status;
19918
19919 virt_addr = mboxq->sge_array->addr[0];
19920 /* The IOCTL status is embedded in the mailbox subheader. */
19921 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19922 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19923 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19924
19925 if ((shdr_status || shdr_add_status) &&
19926 (shdr_status != STATUS_FCF_IN_USE))
19927 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19928 "2558 ADD_FCF_RECORD mailbox failed with "
19929 "status x%x add_status x%x\n",
19930 shdr_status, shdr_add_status);
19931
19932 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19933 }
19934
19935 /**
19936 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19937 * @phba: pointer to lpfc hba data structure.
19938 * @fcf_record: pointer to the initialized fcf record to add.
19939 *
19940 * This routine is invoked to manually add a single FCF record. The caller
19941 * must pass a completely initialized FCF_Record. This routine takes
19942 * care of the nonembedded mailbox operations.
19943 **/
19944 int
lpfc_sli4_add_fcf_record(struct lpfc_hba * phba,struct fcf_record * fcf_record)19945 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19946 {
19947 int rc = 0;
19948 LPFC_MBOXQ_t *mboxq;
19949 uint8_t *bytep;
19950 void *virt_addr;
19951 struct lpfc_mbx_sge sge;
19952 uint32_t alloc_len, req_len;
19953 uint32_t fcfindex;
19954
19955 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19956 if (!mboxq) {
19957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19958 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19959 return -ENOMEM;
19960 }
19961
19962 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19963 sizeof(uint32_t);
19964
19965 /* Allocate DMA memory and set up the non-embedded mailbox command */
19966 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19967 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19968 req_len, LPFC_SLI4_MBX_NEMBED);
19969 if (alloc_len < req_len) {
19970 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19971 "2523 Allocated DMA memory size (x%x) is "
19972 "less than the requested DMA memory "
19973 "size (x%x)\n", alloc_len, req_len);
19974 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19975 return -ENOMEM;
19976 }
19977
19978 /*
19979 * Get the first SGE entry from the non-embedded DMA memory. This
19980 * routine only uses a single SGE.
19981 */
19982 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19983 virt_addr = mboxq->sge_array->addr[0];
19984 /*
19985 * Configure the FCF record for FCFI 0. This is the driver's
19986 * hardcoded default and gets used in nonFIP mode.
19987 */
19988 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19989 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19990 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19991
19992 /*
19993 * Copy the fcf_index and the FCF Record Data. The data starts after
19994 * the FCoE header plus word10. The data copy needs to be endian
19995 * correct.
19996 */
19997 bytep += sizeof(uint32_t);
19998 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19999 mboxq->vport = phba->pport;
20000 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
20001 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20002 if (rc == MBX_NOT_FINISHED) {
20003 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20004 "2515 ADD_FCF_RECORD mailbox failed with "
20005 "status 0x%x\n", rc);
20006 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20007 rc = -EIO;
20008 } else
20009 rc = 0;
20010
20011 return rc;
20012 }
20013
20014 /**
20015 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
20016 * @phba: pointer to lpfc hba data structure.
20017 * @fcf_record: pointer to the fcf record to write the default data.
20018 * @fcf_index: FCF table entry index.
20019 *
20020 * This routine is invoked to build the driver's default FCF record. The
20021 * values used are hardcoded. This routine handles memory initialization.
20022 *
20023 **/
20024 void
lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba * phba,struct fcf_record * fcf_record,uint16_t fcf_index)20025 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
20026 struct fcf_record *fcf_record,
20027 uint16_t fcf_index)
20028 {
20029 memset(fcf_record, 0, sizeof(struct fcf_record));
20030 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
20031 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
20032 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
20033 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
20034 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
20035 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
20036 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
20037 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
20038 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
20039 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
20040 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
20041 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
20042 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
20043 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
20044 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
20045 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
20046 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
20047 /* Set the VLAN bit map */
20048 if (phba->valid_vlan) {
20049 fcf_record->vlan_bitmap[phba->vlan_id / 8]
20050 = 1 << (phba->vlan_id % 8);
20051 }
20052 }
20053
20054 /**
20055 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
20056 * @phba: pointer to lpfc hba data structure.
20057 * @fcf_index: FCF table entry offset.
20058 *
20059 * This routine is invoked to scan the entire FCF table by reading FCF
20060 * record and processing it one at a time starting from the @fcf_index
20061 * for initial FCF discovery or fast FCF failover rediscovery.
20062 *
20063 * Return 0 if the mailbox command is submitted successfully, none 0
20064 * otherwise.
20065 **/
20066 int
lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)20067 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20068 {
20069 int rc = 0, error;
20070 LPFC_MBOXQ_t *mboxq;
20071
20072 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
20073 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
20074 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20075 if (!mboxq) {
20076 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20077 "2000 Failed to allocate mbox for "
20078 "READ_FCF cmd\n");
20079 error = -ENOMEM;
20080 goto fail_fcf_scan;
20081 }
20082 /* Construct the read FCF record mailbox command */
20083 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20084 if (rc) {
20085 error = -EINVAL;
20086 goto fail_fcf_scan;
20087 }
20088 /* Issue the mailbox command asynchronously */
20089 mboxq->vport = phba->pport;
20090 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
20091
20092 spin_lock_irq(&phba->hbalock);
20093 phba->hba_flag |= FCF_TS_INPROG;
20094 spin_unlock_irq(&phba->hbalock);
20095
20096 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20097 if (rc == MBX_NOT_FINISHED)
20098 error = -EIO;
20099 else {
20100 /* Reset eligible FCF count for new scan */
20101 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
20102 phba->fcf.eligible_fcf_cnt = 0;
20103 error = 0;
20104 }
20105 fail_fcf_scan:
20106 if (error) {
20107 if (mboxq)
20108 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20109 /* FCF scan failed, clear FCF_TS_INPROG flag */
20110 spin_lock_irq(&phba->hbalock);
20111 phba->hba_flag &= ~FCF_TS_INPROG;
20112 spin_unlock_irq(&phba->hbalock);
20113 }
20114 return error;
20115 }
20116
20117 /**
20118 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
20119 * @phba: pointer to lpfc hba data structure.
20120 * @fcf_index: FCF table entry offset.
20121 *
20122 * This routine is invoked to read an FCF record indicated by @fcf_index
20123 * and to use it for FLOGI roundrobin FCF failover.
20124 *
20125 * Return 0 if the mailbox command is submitted successfully, none 0
20126 * otherwise.
20127 **/
20128 int
lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)20129 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20130 {
20131 int rc = 0, error;
20132 LPFC_MBOXQ_t *mboxq;
20133
20134 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20135 if (!mboxq) {
20136 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20137 "2763 Failed to allocate mbox for "
20138 "READ_FCF cmd\n");
20139 error = -ENOMEM;
20140 goto fail_fcf_read;
20141 }
20142 /* Construct the read FCF record mailbox command */
20143 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20144 if (rc) {
20145 error = -EINVAL;
20146 goto fail_fcf_read;
20147 }
20148 /* Issue the mailbox command asynchronously */
20149 mboxq->vport = phba->pport;
20150 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
20151 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20152 if (rc == MBX_NOT_FINISHED)
20153 error = -EIO;
20154 else
20155 error = 0;
20156
20157 fail_fcf_read:
20158 if (error && mboxq)
20159 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20160 return error;
20161 }
20162
20163 /**
20164 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
20165 * @phba: pointer to lpfc hba data structure.
20166 * @fcf_index: FCF table entry offset.
20167 *
20168 * This routine is invoked to read an FCF record indicated by @fcf_index to
20169 * determine whether it's eligible for FLOGI roundrobin failover list.
20170 *
20171 * Return 0 if the mailbox command is submitted successfully, none 0
20172 * otherwise.
20173 **/
20174 int
lpfc_sli4_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)20175 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20176 {
20177 int rc = 0, error;
20178 LPFC_MBOXQ_t *mboxq;
20179
20180 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20181 if (!mboxq) {
20182 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20183 "2758 Failed to allocate mbox for "
20184 "READ_FCF cmd\n");
20185 error = -ENOMEM;
20186 goto fail_fcf_read;
20187 }
20188 /* Construct the read FCF record mailbox command */
20189 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20190 if (rc) {
20191 error = -EINVAL;
20192 goto fail_fcf_read;
20193 }
20194 /* Issue the mailbox command asynchronously */
20195 mboxq->vport = phba->pport;
20196 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
20197 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20198 if (rc == MBX_NOT_FINISHED)
20199 error = -EIO;
20200 else
20201 error = 0;
20202
20203 fail_fcf_read:
20204 if (error && mboxq)
20205 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20206 return error;
20207 }
20208
20209 /**
20210 * lpfc_check_next_fcf_pri_level
20211 * @phba: pointer to the lpfc_hba struct for this port.
20212 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
20213 * routine when the rr_bmask is empty. The FCF indecies are put into the
20214 * rr_bmask based on their priority level. Starting from the highest priority
20215 * to the lowest. The most likely FCF candidate will be in the highest
20216 * priority group. When this routine is called it searches the fcf_pri list for
20217 * next lowest priority group and repopulates the rr_bmask with only those
20218 * fcf_indexes.
20219 * returns:
20220 * 1=success 0=failure
20221 **/
20222 static int
lpfc_check_next_fcf_pri_level(struct lpfc_hba * phba)20223 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
20224 {
20225 uint16_t next_fcf_pri;
20226 uint16_t last_index;
20227 struct lpfc_fcf_pri *fcf_pri;
20228 int rc;
20229 int ret = 0;
20230
20231 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20232 LPFC_SLI4_FCF_TBL_INDX_MAX);
20233 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20234 "3060 Last IDX %d\n", last_index);
20235
20236 /* Verify the priority list has 2 or more entries */
20237 spin_lock_irq(&phba->hbalock);
20238 if (list_empty(&phba->fcf.fcf_pri_list) ||
20239 list_is_singular(&phba->fcf.fcf_pri_list)) {
20240 spin_unlock_irq(&phba->hbalock);
20241 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20242 "3061 Last IDX %d\n", last_index);
20243 return 0; /* Empty rr list */
20244 }
20245 spin_unlock_irq(&phba->hbalock);
20246
20247 next_fcf_pri = 0;
20248 /*
20249 * Clear the rr_bmask and set all of the bits that are at this
20250 * priority.
20251 */
20252 memset(phba->fcf.fcf_rr_bmask, 0,
20253 sizeof(*phba->fcf.fcf_rr_bmask));
20254 spin_lock_irq(&phba->hbalock);
20255 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20256 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
20257 continue;
20258 /*
20259 * the 1st priority that has not FLOGI failed
20260 * will be the highest.
20261 */
20262 if (!next_fcf_pri)
20263 next_fcf_pri = fcf_pri->fcf_rec.priority;
20264 spin_unlock_irq(&phba->hbalock);
20265 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20266 rc = lpfc_sli4_fcf_rr_index_set(phba,
20267 fcf_pri->fcf_rec.fcf_index);
20268 if (rc)
20269 return 0;
20270 }
20271 spin_lock_irq(&phba->hbalock);
20272 }
20273 /*
20274 * if next_fcf_pri was not set above and the list is not empty then
20275 * we have failed flogis on all of them. So reset flogi failed
20276 * and start at the beginning.
20277 */
20278 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
20279 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20280 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
20281 /*
20282 * the 1st priority that has not FLOGI failed
20283 * will be the highest.
20284 */
20285 if (!next_fcf_pri)
20286 next_fcf_pri = fcf_pri->fcf_rec.priority;
20287 spin_unlock_irq(&phba->hbalock);
20288 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20289 rc = lpfc_sli4_fcf_rr_index_set(phba,
20290 fcf_pri->fcf_rec.fcf_index);
20291 if (rc)
20292 return 0;
20293 }
20294 spin_lock_irq(&phba->hbalock);
20295 }
20296 } else
20297 ret = 1;
20298 spin_unlock_irq(&phba->hbalock);
20299
20300 return ret;
20301 }
20302 /**
20303 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
20304 * @phba: pointer to lpfc hba data structure.
20305 *
20306 * This routine is to get the next eligible FCF record index in a round
20307 * robin fashion. If the next eligible FCF record index equals to the
20308 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
20309 * shall be returned, otherwise, the next eligible FCF record's index
20310 * shall be returned.
20311 **/
20312 uint16_t
lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba * phba)20313 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
20314 {
20315 uint16_t next_fcf_index;
20316
20317 initial_priority:
20318 /* Search start from next bit of currently registered FCF index */
20319 next_fcf_index = phba->fcf.current_rec.fcf_indx;
20320
20321 next_priority:
20322 /* Determine the next fcf index to check */
20323 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
20324 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20325 LPFC_SLI4_FCF_TBL_INDX_MAX,
20326 next_fcf_index);
20327
20328 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
20329 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20330 /*
20331 * If we have wrapped then we need to clear the bits that
20332 * have been tested so that we can detect when we should
20333 * change the priority level.
20334 */
20335 next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20336 LPFC_SLI4_FCF_TBL_INDX_MAX);
20337 }
20338
20339
20340 /* Check roundrobin failover list empty condition */
20341 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20342 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20343 /*
20344 * If next fcf index is not found check if there are lower
20345 * Priority level fcf's in the fcf_priority list.
20346 * Set up the rr_bmask with all of the avaiable fcf bits
20347 * at that level and continue the selection process.
20348 */
20349 if (lpfc_check_next_fcf_pri_level(phba))
20350 goto initial_priority;
20351 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20352 "2844 No roundrobin failover FCF available\n");
20353
20354 return LPFC_FCOE_FCF_NEXT_NONE;
20355 }
20356
20357 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20358 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20359 LPFC_FCF_FLOGI_FAILED) {
20360 if (list_is_singular(&phba->fcf.fcf_pri_list))
20361 return LPFC_FCOE_FCF_NEXT_NONE;
20362
20363 goto next_priority;
20364 }
20365
20366 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20367 "2845 Get next roundrobin failover FCF (x%x)\n",
20368 next_fcf_index);
20369
20370 return next_fcf_index;
20371 }
20372
20373 /**
20374 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20375 * @phba: pointer to lpfc hba data structure.
20376 * @fcf_index: index into the FCF table to 'set'
20377 *
20378 * This routine sets the FCF record index in to the eligible bmask for
20379 * roundrobin failover search. It checks to make sure that the index
20380 * does not go beyond the range of the driver allocated bmask dimension
20381 * before setting the bit.
20382 *
20383 * Returns 0 if the index bit successfully set, otherwise, it returns
20384 * -EINVAL.
20385 **/
20386 int
lpfc_sli4_fcf_rr_index_set(struct lpfc_hba * phba,uint16_t fcf_index)20387 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20388 {
20389 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20390 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20391 "2610 FCF (x%x) reached driver's book "
20392 "keeping dimension:x%x\n",
20393 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20394 return -EINVAL;
20395 }
20396 /* Set the eligible FCF record index bmask */
20397 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20398
20399 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20400 "2790 Set FCF (x%x) to roundrobin FCF failover "
20401 "bmask\n", fcf_index);
20402
20403 return 0;
20404 }
20405
20406 /**
20407 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20408 * @phba: pointer to lpfc hba data structure.
20409 * @fcf_index: index into the FCF table to 'clear'
20410 *
20411 * This routine clears the FCF record index from the eligible bmask for
20412 * roundrobin failover search. It checks to make sure that the index
20413 * does not go beyond the range of the driver allocated bmask dimension
20414 * before clearing the bit.
20415 **/
20416 void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba * phba,uint16_t fcf_index)20417 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20418 {
20419 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20420 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20421 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20422 "2762 FCF (x%x) reached driver's book "
20423 "keeping dimension:x%x\n",
20424 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20425 return;
20426 }
20427 /* Clear the eligible FCF record index bmask */
20428 spin_lock_irq(&phba->hbalock);
20429 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20430 list) {
20431 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20432 list_del_init(&fcf_pri->list);
20433 break;
20434 }
20435 }
20436 spin_unlock_irq(&phba->hbalock);
20437 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20438
20439 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20440 "2791 Clear FCF (x%x) from roundrobin failover "
20441 "bmask\n", fcf_index);
20442 }
20443
20444 /**
20445 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20446 * @phba: pointer to lpfc hba data structure.
20447 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20448 *
20449 * This routine is the completion routine for the rediscover FCF table mailbox
20450 * command. If the mailbox command returned failure, it will try to stop the
20451 * FCF rediscover wait timer.
20452 **/
20453 static void
lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox)20454 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20455 {
20456 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20457 uint32_t shdr_status, shdr_add_status;
20458
20459 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20460
20461 shdr_status = bf_get(lpfc_mbox_hdr_status,
20462 &redisc_fcf->header.cfg_shdr.response);
20463 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20464 &redisc_fcf->header.cfg_shdr.response);
20465 if (shdr_status || shdr_add_status) {
20466 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20467 "2746 Requesting for FCF rediscovery failed "
20468 "status x%x add_status x%x\n",
20469 shdr_status, shdr_add_status);
20470 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20471 spin_lock_irq(&phba->hbalock);
20472 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20473 spin_unlock_irq(&phba->hbalock);
20474 /*
20475 * CVL event triggered FCF rediscover request failed,
20476 * last resort to re-try current registered FCF entry.
20477 */
20478 lpfc_retry_pport_discovery(phba);
20479 } else {
20480 spin_lock_irq(&phba->hbalock);
20481 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20482 spin_unlock_irq(&phba->hbalock);
20483 /*
20484 * DEAD FCF event triggered FCF rediscover request
20485 * failed, last resort to fail over as a link down
20486 * to FCF registration.
20487 */
20488 lpfc_sli4_fcf_dead_failthrough(phba);
20489 }
20490 } else {
20491 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20492 "2775 Start FCF rediscover quiescent timer\n");
20493 /*
20494 * Start FCF rediscovery wait timer for pending FCF
20495 * before rescan FCF record table.
20496 */
20497 lpfc_fcf_redisc_wait_start_timer(phba);
20498 }
20499
20500 mempool_free(mbox, phba->mbox_mem_pool);
20501 }
20502
20503 /**
20504 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20505 * @phba: pointer to lpfc hba data structure.
20506 *
20507 * This routine is invoked to request for rediscovery of the entire FCF table
20508 * by the port.
20509 **/
20510 int
lpfc_sli4_redisc_fcf_table(struct lpfc_hba * phba)20511 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20512 {
20513 LPFC_MBOXQ_t *mbox;
20514 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20515 int rc, length;
20516
20517 /* Cancel retry delay timers to all vports before FCF rediscover */
20518 lpfc_cancel_all_vport_retry_delay_timer(phba);
20519
20520 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20521 if (!mbox) {
20522 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20523 "2745 Failed to allocate mbox for "
20524 "requesting FCF rediscover.\n");
20525 return -ENOMEM;
20526 }
20527
20528 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20529 sizeof(struct lpfc_sli4_cfg_mhdr));
20530 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20531 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20532 length, LPFC_SLI4_MBX_EMBED);
20533
20534 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20535 /* Set count to 0 for invalidating the entire FCF database */
20536 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20537
20538 /* Issue the mailbox command asynchronously */
20539 mbox->vport = phba->pport;
20540 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20541 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20542
20543 if (rc == MBX_NOT_FINISHED) {
20544 mempool_free(mbox, phba->mbox_mem_pool);
20545 return -EIO;
20546 }
20547 return 0;
20548 }
20549
20550 /**
20551 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20552 * @phba: pointer to lpfc hba data structure.
20553 *
20554 * This function is the failover routine as a last resort to the FCF DEAD
20555 * event when driver failed to perform fast FCF failover.
20556 **/
20557 void
lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba * phba)20558 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20559 {
20560 uint32_t link_state;
20561
20562 /*
20563 * Last resort as FCF DEAD event failover will treat this as
20564 * a link down, but save the link state because we don't want
20565 * it to be changed to Link Down unless it is already down.
20566 */
20567 link_state = phba->link_state;
20568 lpfc_linkdown(phba);
20569 phba->link_state = link_state;
20570
20571 /* Unregister FCF if no devices connected to it */
20572 lpfc_unregister_unused_fcf(phba);
20573 }
20574
20575 /**
20576 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20577 * @phba: pointer to lpfc hba data structure.
20578 * @rgn23_data: pointer to configure region 23 data.
20579 *
20580 * This function gets SLI3 port configure region 23 data through memory dump
20581 * mailbox command. When it successfully retrieves data, the size of the data
20582 * will be returned, otherwise, 0 will be returned.
20583 **/
20584 static uint32_t
lpfc_sli_get_config_region23(struct lpfc_hba * phba,char * rgn23_data)20585 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20586 {
20587 LPFC_MBOXQ_t *pmb = NULL;
20588 MAILBOX_t *mb;
20589 uint32_t offset = 0;
20590 int rc;
20591
20592 if (!rgn23_data)
20593 return 0;
20594
20595 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20596 if (!pmb) {
20597 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20598 "2600 failed to allocate mailbox memory\n");
20599 return 0;
20600 }
20601 mb = &pmb->u.mb;
20602
20603 do {
20604 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20606
20607 if (rc != MBX_SUCCESS) {
20608 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20609 "2601 failed to read config "
20610 "region 23, rc 0x%x Status 0x%x\n",
20611 rc, mb->mbxStatus);
20612 mb->un.varDmp.word_cnt = 0;
20613 }
20614 /*
20615 * dump mem may return a zero when finished or we got a
20616 * mailbox error, either way we are done.
20617 */
20618 if (mb->un.varDmp.word_cnt == 0)
20619 break;
20620
20621 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20622 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20623
20624 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20625 rgn23_data + offset,
20626 mb->un.varDmp.word_cnt);
20627 offset += mb->un.varDmp.word_cnt;
20628 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20629
20630 mempool_free(pmb, phba->mbox_mem_pool);
20631 return offset;
20632 }
20633
20634 /**
20635 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20636 * @phba: pointer to lpfc hba data structure.
20637 * @rgn23_data: pointer to configure region 23 data.
20638 *
20639 * This function gets SLI4 port configure region 23 data through memory dump
20640 * mailbox command. When it successfully retrieves data, the size of the data
20641 * will be returned, otherwise, 0 will be returned.
20642 **/
20643 static uint32_t
lpfc_sli4_get_config_region23(struct lpfc_hba * phba,char * rgn23_data)20644 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20645 {
20646 LPFC_MBOXQ_t *mboxq = NULL;
20647 struct lpfc_dmabuf *mp = NULL;
20648 struct lpfc_mqe *mqe;
20649 uint32_t data_length = 0;
20650 int rc;
20651
20652 if (!rgn23_data)
20653 return 0;
20654
20655 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20656 if (!mboxq) {
20657 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20658 "3105 failed to allocate mailbox memory\n");
20659 return 0;
20660 }
20661
20662 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20663 goto out;
20664 mqe = &mboxq->u.mqe;
20665 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
20666 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20667 if (rc)
20668 goto out;
20669 data_length = mqe->un.mb_words[5];
20670 if (data_length == 0)
20671 goto out;
20672 if (data_length > DMP_RGN23_SIZE) {
20673 data_length = 0;
20674 goto out;
20675 }
20676 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20677 out:
20678 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
20679 return data_length;
20680 }
20681
20682 /**
20683 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20684 * @phba: pointer to lpfc hba data structure.
20685 *
20686 * This function read region 23 and parse TLV for port status to
20687 * decide if the user disaled the port. If the TLV indicates the
20688 * port is disabled, the hba_flag is set accordingly.
20689 **/
20690 void
lpfc_sli_read_link_ste(struct lpfc_hba * phba)20691 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20692 {
20693 uint8_t *rgn23_data = NULL;
20694 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20695 uint32_t offset = 0;
20696
20697 /* Get adapter Region 23 data */
20698 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20699 if (!rgn23_data)
20700 goto out;
20701
20702 if (phba->sli_rev < LPFC_SLI_REV4)
20703 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20704 else {
20705 if_type = bf_get(lpfc_sli_intf_if_type,
20706 &phba->sli4_hba.sli_intf);
20707 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20708 goto out;
20709 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20710 }
20711
20712 if (!data_size)
20713 goto out;
20714
20715 /* Check the region signature first */
20716 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20717 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20718 "2619 Config region 23 has bad signature\n");
20719 goto out;
20720 }
20721 offset += 4;
20722
20723 /* Check the data structure version */
20724 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20725 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20726 "2620 Config region 23 has bad version\n");
20727 goto out;
20728 }
20729 offset += 4;
20730
20731 /* Parse TLV entries in the region */
20732 while (offset < data_size) {
20733 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20734 break;
20735 /*
20736 * If the TLV is not driver specific TLV or driver id is
20737 * not linux driver id, skip the record.
20738 */
20739 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20740 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20741 (rgn23_data[offset + 3] != 0)) {
20742 offset += rgn23_data[offset + 1] * 4 + 4;
20743 continue;
20744 }
20745
20746 /* Driver found a driver specific TLV in the config region */
20747 sub_tlv_len = rgn23_data[offset + 1] * 4;
20748 offset += 4;
20749 tlv_offset = 0;
20750
20751 /*
20752 * Search for configured port state sub-TLV.
20753 */
20754 while ((offset < data_size) &&
20755 (tlv_offset < sub_tlv_len)) {
20756 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20757 offset += 4;
20758 tlv_offset += 4;
20759 break;
20760 }
20761 if (rgn23_data[offset] != PORT_STE_TYPE) {
20762 offset += rgn23_data[offset + 1] * 4 + 4;
20763 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20764 continue;
20765 }
20766
20767 /* This HBA contains PORT_STE configured */
20768 if (!rgn23_data[offset + 2])
20769 phba->hba_flag |= LINK_DISABLED;
20770
20771 goto out;
20772 }
20773 }
20774
20775 out:
20776 kfree(rgn23_data);
20777 return;
20778 }
20779
20780 /**
20781 * lpfc_log_fw_write_cmpl - logs firmware write completion status
20782 * @phba: pointer to lpfc hba data structure
20783 * @shdr_status: wr_object rsp's status field
20784 * @shdr_add_status: wr_object rsp's add_status field
20785 * @shdr_add_status_2: wr_object rsp's add_status_2 field
20786 * @shdr_change_status: wr_object rsp's change_status field
20787 * @shdr_csf: wr_object rsp's csf bit
20788 *
20789 * This routine is intended to be called after a firmware write completes.
20790 * It will log next action items to be performed by the user to instantiate
20791 * the newly downloaded firmware or reason for incompatibility.
20792 **/
20793 static void
lpfc_log_fw_write_cmpl(struct lpfc_hba * phba,u32 shdr_status,u32 shdr_add_status,u32 shdr_add_status_2,u32 shdr_change_status,u32 shdr_csf)20794 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20795 u32 shdr_add_status, u32 shdr_add_status_2,
20796 u32 shdr_change_status, u32 shdr_csf)
20797 {
20798 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20799 "4198 %s: flash_id x%02x, asic_rev x%02x, "
20800 "status x%02x, add_status x%02x, add_status_2 x%02x, "
20801 "change_status x%02x, csf %01x\n", __func__,
20802 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20803 shdr_status, shdr_add_status, shdr_add_status_2,
20804 shdr_change_status, shdr_csf);
20805
20806 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20807 switch (shdr_add_status_2) {
20808 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20809 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20810 "4199 Firmware write failed: "
20811 "image incompatible with flash x%02x\n",
20812 phba->sli4_hba.flash_id);
20813 break;
20814 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20815 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20816 "4200 Firmware write failed: "
20817 "image incompatible with ASIC "
20818 "architecture x%02x\n",
20819 phba->sli4_hba.asic_rev);
20820 break;
20821 default:
20822 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20823 "4210 Firmware write failed: "
20824 "add_status_2 x%02x\n",
20825 shdr_add_status_2);
20826 break;
20827 }
20828 } else if (!shdr_status && !shdr_add_status) {
20829 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20830 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20831 if (shdr_csf)
20832 shdr_change_status =
20833 LPFC_CHANGE_STATUS_PCI_RESET;
20834 }
20835
20836 switch (shdr_change_status) {
20837 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20838 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20839 "3198 Firmware write complete: System "
20840 "reboot required to instantiate\n");
20841 break;
20842 case (LPFC_CHANGE_STATUS_FW_RESET):
20843 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20844 "3199 Firmware write complete: "
20845 "Firmware reset required to "
20846 "instantiate\n");
20847 break;
20848 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20849 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20850 "3200 Firmware write complete: Port "
20851 "Migration or PCI Reset required to "
20852 "instantiate\n");
20853 break;
20854 case (LPFC_CHANGE_STATUS_PCI_RESET):
20855 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20856 "3201 Firmware write complete: PCI "
20857 "Reset required to instantiate\n");
20858 break;
20859 default:
20860 break;
20861 }
20862 }
20863 }
20864
20865 /**
20866 * lpfc_wr_object - write an object to the firmware
20867 * @phba: HBA structure that indicates port to create a queue on.
20868 * @dmabuf_list: list of dmabufs to write to the port.
20869 * @size: the total byte value of the objects to write to the port.
20870 * @offset: the current offset to be used to start the transfer.
20871 *
20872 * This routine will create a wr_object mailbox command to send to the port.
20873 * the mailbox command will be constructed using the dma buffers described in
20874 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20875 * BDEs that the imbedded mailbox can support. The @offset variable will be
20876 * used to indicate the starting offset of the transfer and will also return
20877 * the offset after the write object mailbox has completed. @size is used to
20878 * determine the end of the object and whether the eof bit should be set.
20879 *
20880 * Return 0 is successful and offset will contain the new offset to use
20881 * for the next write.
20882 * Return negative value for error cases.
20883 **/
20884 int
lpfc_wr_object(struct lpfc_hba * phba,struct list_head * dmabuf_list,uint32_t size,uint32_t * offset)20885 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20886 uint32_t size, uint32_t *offset)
20887 {
20888 struct lpfc_mbx_wr_object *wr_object;
20889 LPFC_MBOXQ_t *mbox;
20890 int rc = 0, i = 0;
20891 int mbox_status = 0;
20892 uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20893 uint32_t shdr_change_status = 0, shdr_csf = 0;
20894 uint32_t mbox_tmo;
20895 struct lpfc_dmabuf *dmabuf;
20896 uint32_t written = 0;
20897 bool check_change_status = false;
20898
20899 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20900 if (!mbox)
20901 return -ENOMEM;
20902
20903 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20904 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20905 sizeof(struct lpfc_mbx_wr_object) -
20906 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20907
20908 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20909 wr_object->u.request.write_offset = *offset;
20910 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20911 wr_object->u.request.object_name[0] =
20912 cpu_to_le32(wr_object->u.request.object_name[0]);
20913 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20914 list_for_each_entry(dmabuf, dmabuf_list, list) {
20915 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20916 break;
20917 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20918 wr_object->u.request.bde[i].addrHigh =
20919 putPaddrHigh(dmabuf->phys);
20920 if (written + SLI4_PAGE_SIZE >= size) {
20921 wr_object->u.request.bde[i].tus.f.bdeSize =
20922 (size - written);
20923 written += (size - written);
20924 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20925 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20926 check_change_status = true;
20927 } else {
20928 wr_object->u.request.bde[i].tus.f.bdeSize =
20929 SLI4_PAGE_SIZE;
20930 written += SLI4_PAGE_SIZE;
20931 }
20932 i++;
20933 }
20934 wr_object->u.request.bde_count = i;
20935 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20936 if (!phba->sli4_hba.intr_enable)
20937 mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20938 else {
20939 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20940 mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20941 }
20942
20943 /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
20944 rc = mbox_status;
20945
20946 /* The IOCTL status is embedded in the mailbox subheader. */
20947 shdr_status = bf_get(lpfc_mbox_hdr_status,
20948 &wr_object->header.cfg_shdr.response);
20949 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20950 &wr_object->header.cfg_shdr.response);
20951 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20952 &wr_object->header.cfg_shdr.response);
20953 if (check_change_status) {
20954 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20955 &wr_object->u.response);
20956 shdr_csf = bf_get(lpfc_wr_object_csf,
20957 &wr_object->u.response);
20958 }
20959
20960 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20961 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20962 "3025 Write Object mailbox failed with "
20963 "status x%x add_status x%x, add_status_2 x%x, "
20964 "mbx status x%x\n",
20965 shdr_status, shdr_add_status, shdr_add_status_2,
20966 rc);
20967 rc = -ENXIO;
20968 *offset = shdr_add_status;
20969 } else {
20970 *offset += wr_object->u.response.actual_write_length;
20971 }
20972
20973 if (rc || check_change_status)
20974 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20975 shdr_add_status_2, shdr_change_status,
20976 shdr_csf);
20977
20978 if (!phba->sli4_hba.intr_enable)
20979 mempool_free(mbox, phba->mbox_mem_pool);
20980 else if (mbox_status != MBX_TIMEOUT)
20981 mempool_free(mbox, phba->mbox_mem_pool);
20982
20983 return rc;
20984 }
20985
20986 /**
20987 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20988 * @vport: pointer to vport data structure.
20989 *
20990 * This function iterate through the mailboxq and clean up all REG_LOGIN
20991 * and REG_VPI mailbox commands associated with the vport. This function
20992 * is called when driver want to restart discovery of the vport due to
20993 * a Clear Virtual Link event.
20994 **/
20995 void
lpfc_cleanup_pending_mbox(struct lpfc_vport * vport)20996 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20997 {
20998 struct lpfc_hba *phba = vport->phba;
20999 LPFC_MBOXQ_t *mb, *nextmb;
21000 struct lpfc_nodelist *ndlp;
21001 struct lpfc_nodelist *act_mbx_ndlp = NULL;
21002 LIST_HEAD(mbox_cmd_list);
21003 uint8_t restart_loop;
21004
21005 /* Clean up internally queued mailbox commands with the vport */
21006 spin_lock_irq(&phba->hbalock);
21007 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
21008 if (mb->vport != vport)
21009 continue;
21010
21011 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21012 (mb->u.mb.mbxCommand != MBX_REG_VPI))
21013 continue;
21014
21015 list_move_tail(&mb->list, &mbox_cmd_list);
21016 }
21017 /* Clean up active mailbox command with the vport */
21018 mb = phba->sli.mbox_active;
21019 if (mb && (mb->vport == vport)) {
21020 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
21021 (mb->u.mb.mbxCommand == MBX_REG_VPI))
21022 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21023 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21024 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
21025
21026 /* This reference is local to this routine. The
21027 * reference is removed at routine exit.
21028 */
21029 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
21030
21031 /* Unregister the RPI when mailbox complete */
21032 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21033 }
21034 }
21035 /* Cleanup any mailbox completions which are not yet processed */
21036 do {
21037 restart_loop = 0;
21038 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
21039 /*
21040 * If this mailox is already processed or it is
21041 * for another vport ignore it.
21042 */
21043 if ((mb->vport != vport) ||
21044 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
21045 continue;
21046
21047 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21048 (mb->u.mb.mbxCommand != MBX_REG_VPI))
21049 continue;
21050
21051 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21052 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21053 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
21054 /* Unregister the RPI when mailbox complete */
21055 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21056 restart_loop = 1;
21057 spin_unlock_irq(&phba->hbalock);
21058 spin_lock(&ndlp->lock);
21059 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21060 spin_unlock(&ndlp->lock);
21061 spin_lock_irq(&phba->hbalock);
21062 break;
21063 }
21064 }
21065 } while (restart_loop);
21066
21067 spin_unlock_irq(&phba->hbalock);
21068
21069 /* Release the cleaned-up mailbox commands */
21070 while (!list_empty(&mbox_cmd_list)) {
21071 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
21072 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21073 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
21074 mb->ctx_ndlp = NULL;
21075 if (ndlp) {
21076 spin_lock(&ndlp->lock);
21077 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21078 spin_unlock(&ndlp->lock);
21079 lpfc_nlp_put(ndlp);
21080 }
21081 }
21082 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
21083 }
21084
21085 /* Release the ndlp with the cleaned-up active mailbox command */
21086 if (act_mbx_ndlp) {
21087 spin_lock(&act_mbx_ndlp->lock);
21088 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21089 spin_unlock(&act_mbx_ndlp->lock);
21090 lpfc_nlp_put(act_mbx_ndlp);
21091 }
21092 }
21093
21094 /**
21095 * lpfc_drain_txq - Drain the txq
21096 * @phba: Pointer to HBA context object.
21097 *
21098 * This function attempt to submit IOCBs on the txq
21099 * to the adapter. For SLI4 adapters, the txq contains
21100 * ELS IOCBs that have been deferred because the there
21101 * are no SGLs. This congestion can occur with large
21102 * vport counts during node discovery.
21103 **/
21104
21105 uint32_t
lpfc_drain_txq(struct lpfc_hba * phba)21106 lpfc_drain_txq(struct lpfc_hba *phba)
21107 {
21108 LIST_HEAD(completions);
21109 struct lpfc_sli_ring *pring;
21110 struct lpfc_iocbq *piocbq = NULL;
21111 unsigned long iflags = 0;
21112 char *fail_msg = NULL;
21113 uint32_t txq_cnt = 0;
21114 struct lpfc_queue *wq;
21115 int ret = 0;
21116
21117 if (phba->link_flag & LS_MDS_LOOPBACK) {
21118 /* MDS WQE are posted only to first WQ*/
21119 wq = phba->sli4_hba.hdwq[0].io_wq;
21120 if (unlikely(!wq))
21121 return 0;
21122 pring = wq->pring;
21123 } else {
21124 wq = phba->sli4_hba.els_wq;
21125 if (unlikely(!wq))
21126 return 0;
21127 pring = lpfc_phba_elsring(phba);
21128 }
21129
21130 if (unlikely(!pring) || list_empty(&pring->txq))
21131 return 0;
21132
21133 spin_lock_irqsave(&pring->ring_lock, iflags);
21134 list_for_each_entry(piocbq, &pring->txq, list) {
21135 txq_cnt++;
21136 }
21137
21138 if (txq_cnt > pring->txq_max)
21139 pring->txq_max = txq_cnt;
21140
21141 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21142
21143 while (!list_empty(&pring->txq)) {
21144 spin_lock_irqsave(&pring->ring_lock, iflags);
21145
21146 piocbq = lpfc_sli_ringtx_get(phba, pring);
21147 if (!piocbq) {
21148 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21149 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21150 "2823 txq empty and txq_cnt is %d\n ",
21151 txq_cnt);
21152 break;
21153 }
21154 txq_cnt--;
21155
21156 ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
21157
21158 if (ret && ret != IOCB_BUSY) {
21159 fail_msg = " - Cannot send IO ";
21160 piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21161 }
21162 if (fail_msg) {
21163 piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
21164 /* Failed means we can't issue and need to cancel */
21165 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21166 "2822 IOCB failed %s iotag 0x%x "
21167 "xri 0x%x %d flg x%x\n",
21168 fail_msg, piocbq->iotag,
21169 piocbq->sli4_xritag, ret,
21170 piocbq->cmd_flag);
21171 list_add_tail(&piocbq->list, &completions);
21172 fail_msg = NULL;
21173 }
21174 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21175 if (txq_cnt == 0 || ret == IOCB_BUSY)
21176 break;
21177 }
21178 /* Cancel all the IOCBs that cannot be issued */
21179 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
21180 IOERR_SLI_ABORTED);
21181
21182 return txq_cnt;
21183 }
21184
21185 /**
21186 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
21187 * @phba: Pointer to HBA context object.
21188 * @pwqeq: Pointer to command WQE.
21189 * @sglq: Pointer to the scatter gather queue object.
21190 *
21191 * This routine converts the bpl or bde that is in the WQE
21192 * to a sgl list for the sli4 hardware. The physical address
21193 * of the bpl/bde is converted back to a virtual address.
21194 * If the WQE contains a BPL then the list of BDE's is
21195 * converted to sli4_sge's. If the WQE contains a single
21196 * BDE then it is converted to a single sli_sge.
21197 * The WQE is still in cpu endianness so the contents of
21198 * the bpl can be used without byte swapping.
21199 *
21200 * Returns valid XRI = Success, NO_XRI = Failure.
21201 */
21202 static uint16_t
lpfc_wqe_bpl2sgl(struct lpfc_hba * phba,struct lpfc_iocbq * pwqeq,struct lpfc_sglq * sglq)21203 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
21204 struct lpfc_sglq *sglq)
21205 {
21206 uint16_t xritag = NO_XRI;
21207 struct ulp_bde64 *bpl = NULL;
21208 struct ulp_bde64 bde;
21209 struct sli4_sge *sgl = NULL;
21210 struct lpfc_dmabuf *dmabuf;
21211 union lpfc_wqe128 *wqe;
21212 int numBdes = 0;
21213 int i = 0;
21214 uint32_t offset = 0; /* accumulated offset in the sg request list */
21215 int inbound = 0; /* number of sg reply entries inbound from firmware */
21216 uint32_t cmd;
21217
21218 if (!pwqeq || !sglq)
21219 return xritag;
21220
21221 sgl = (struct sli4_sge *)sglq->sgl;
21222 wqe = &pwqeq->wqe;
21223 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
21224
21225 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
21226 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
21227 return sglq->sli4_xritag;
21228 numBdes = pwqeq->num_bdes;
21229 if (numBdes) {
21230 /* The addrHigh and addrLow fields within the WQE
21231 * have not been byteswapped yet so there is no
21232 * need to swap them back.
21233 */
21234 if (pwqeq->bpl_dmabuf)
21235 dmabuf = pwqeq->bpl_dmabuf;
21236 else
21237 return xritag;
21238
21239 bpl = (struct ulp_bde64 *)dmabuf->virt;
21240 if (!bpl)
21241 return xritag;
21242
21243 for (i = 0; i < numBdes; i++) {
21244 /* Should already be byte swapped. */
21245 sgl->addr_hi = bpl->addrHigh;
21246 sgl->addr_lo = bpl->addrLow;
21247
21248 sgl->word2 = le32_to_cpu(sgl->word2);
21249 if ((i+1) == numBdes)
21250 bf_set(lpfc_sli4_sge_last, sgl, 1);
21251 else
21252 bf_set(lpfc_sli4_sge_last, sgl, 0);
21253 /* swap the size field back to the cpu so we
21254 * can assign it to the sgl.
21255 */
21256 bde.tus.w = le32_to_cpu(bpl->tus.w);
21257 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
21258 /* The offsets in the sgl need to be accumulated
21259 * separately for the request and reply lists.
21260 * The request is always first, the reply follows.
21261 */
21262 switch (cmd) {
21263 case CMD_GEN_REQUEST64_WQE:
21264 /* add up the reply sg entries */
21265 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
21266 inbound++;
21267 /* first inbound? reset the offset */
21268 if (inbound == 1)
21269 offset = 0;
21270 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21271 bf_set(lpfc_sli4_sge_type, sgl,
21272 LPFC_SGE_TYPE_DATA);
21273 offset += bde.tus.f.bdeSize;
21274 break;
21275 case CMD_FCP_TRSP64_WQE:
21276 bf_set(lpfc_sli4_sge_offset, sgl, 0);
21277 bf_set(lpfc_sli4_sge_type, sgl,
21278 LPFC_SGE_TYPE_DATA);
21279 break;
21280 case CMD_FCP_TSEND64_WQE:
21281 case CMD_FCP_TRECEIVE64_WQE:
21282 bf_set(lpfc_sli4_sge_type, sgl,
21283 bpl->tus.f.bdeFlags);
21284 if (i < 3)
21285 offset = 0;
21286 else
21287 offset += bde.tus.f.bdeSize;
21288 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21289 break;
21290 }
21291 sgl->word2 = cpu_to_le32(sgl->word2);
21292 bpl++;
21293 sgl++;
21294 }
21295 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
21296 /* The addrHigh and addrLow fields of the BDE have not
21297 * been byteswapped yet so they need to be swapped
21298 * before putting them in the sgl.
21299 */
21300 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
21301 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
21302 sgl->word2 = le32_to_cpu(sgl->word2);
21303 bf_set(lpfc_sli4_sge_last, sgl, 1);
21304 sgl->word2 = cpu_to_le32(sgl->word2);
21305 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
21306 }
21307 return sglq->sli4_xritag;
21308 }
21309
21310 /**
21311 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
21312 * @phba: Pointer to HBA context object.
21313 * @qp: Pointer to HDW queue.
21314 * @pwqe: Pointer to command WQE.
21315 **/
21316 int
lpfc_sli4_issue_wqe(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_iocbq * pwqe)21317 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21318 struct lpfc_iocbq *pwqe)
21319 {
21320 union lpfc_wqe128 *wqe = &pwqe->wqe;
21321 struct lpfc_async_xchg_ctx *ctxp;
21322 struct lpfc_queue *wq;
21323 struct lpfc_sglq *sglq;
21324 struct lpfc_sli_ring *pring;
21325 unsigned long iflags;
21326 uint32_t ret = 0;
21327
21328 /* NVME_LS and NVME_LS ABTS requests. */
21329 if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
21330 pring = phba->sli4_hba.nvmels_wq->pring;
21331 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21332 qp, wq_access);
21333 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
21334 if (!sglq) {
21335 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21336 return WQE_BUSY;
21337 }
21338 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21339 pwqe->sli4_xritag = sglq->sli4_xritag;
21340 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
21341 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21342 return WQE_ERROR;
21343 }
21344 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21345 pwqe->sli4_xritag);
21346 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21347 if (ret) {
21348 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21349 return ret;
21350 }
21351
21352 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21353 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21354
21355 lpfc_sli4_poll_eq(qp->hba_eq);
21356 return 0;
21357 }
21358
21359 /* NVME_FCREQ and NVME_ABTS requests */
21360 if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21361 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21362 wq = qp->io_wq;
21363 pring = wq->pring;
21364
21365 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21366
21367 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21368 qp, wq_access);
21369 ret = lpfc_sli4_wq_put(wq, wqe);
21370 if (ret) {
21371 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21372 return ret;
21373 }
21374 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21375 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21376
21377 lpfc_sli4_poll_eq(qp->hba_eq);
21378 return 0;
21379 }
21380
21381 /* NVMET requests */
21382 if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21383 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21384 wq = qp->io_wq;
21385 pring = wq->pring;
21386
21387 ctxp = pwqe->context_un.axchg;
21388 sglq = ctxp->ctxbuf->sglq;
21389 if (pwqe->sli4_xritag == NO_XRI) {
21390 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21391 pwqe->sli4_xritag = sglq->sli4_xritag;
21392 }
21393 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21394 pwqe->sli4_xritag);
21395 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21396
21397 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21398 qp, wq_access);
21399 ret = lpfc_sli4_wq_put(wq, wqe);
21400 if (ret) {
21401 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21402 return ret;
21403 }
21404 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21405 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21406
21407 lpfc_sli4_poll_eq(qp->hba_eq);
21408 return 0;
21409 }
21410 return WQE_ERROR;
21411 }
21412
21413 /**
21414 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21415 * @phba: Pointer to HBA context object.
21416 * @cmdiocb: Pointer to driver command iocb object.
21417 * @cmpl: completion function.
21418 *
21419 * Fill the appropriate fields for the abort WQE and call
21420 * internal routine lpfc_sli4_issue_wqe to send the WQE
21421 * This function is called with hbalock held and no ring_lock held.
21422 *
21423 * RETURNS 0 - SUCCESS
21424 **/
21425
21426 int
lpfc_sli4_issue_abort_iotag(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,void * cmpl)21427 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21428 void *cmpl)
21429 {
21430 struct lpfc_vport *vport = cmdiocb->vport;
21431 struct lpfc_iocbq *abtsiocb = NULL;
21432 union lpfc_wqe128 *abtswqe;
21433 struct lpfc_io_buf *lpfc_cmd;
21434 int retval = IOCB_ERROR;
21435 u16 xritag = cmdiocb->sli4_xritag;
21436
21437 /*
21438 * The scsi command can not be in txq and it is in flight because the
21439 * pCmd is still pointing at the SCSI command we have to abort. There
21440 * is no need to search the txcmplq. Just send an abort to the FW.
21441 */
21442
21443 abtsiocb = __lpfc_sli_get_iocbq(phba);
21444 if (!abtsiocb)
21445 return WQE_NORESOURCE;
21446
21447 /* Indicate the IO is being aborted by the driver. */
21448 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21449
21450 abtswqe = &abtsiocb->wqe;
21451 memset(abtswqe, 0, sizeof(*abtswqe));
21452
21453 if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
21454 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21455 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21456 abtswqe->abort_cmd.rsrvd5 = 0;
21457 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21458 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21459 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21460 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21461 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21462 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21463 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21464
21465 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
21466 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21467 abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21468 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21469 abtsiocb->cmd_flag |= LPFC_IO_FCP;
21470 if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21471 abtsiocb->cmd_flag |= LPFC_IO_NVME;
21472 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21473 abtsiocb->cmd_flag |= LPFC_IO_FOF;
21474 abtsiocb->vport = vport;
21475 abtsiocb->cmd_cmpl = cmpl;
21476
21477 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21478 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21479
21480 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21481 "0359 Abort xri x%x, original iotag x%x, "
21482 "abort cmd iotag x%x retval x%x\n",
21483 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21484
21485 if (retval) {
21486 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21487 __lpfc_sli_release_iocbq(phba, abtsiocb);
21488 }
21489
21490 return retval;
21491 }
21492
21493 #ifdef LPFC_MXP_STAT
21494 /**
21495 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21496 * @phba: pointer to lpfc hba data structure.
21497 * @hwqid: belong to which HWQ.
21498 *
21499 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21500 * 15 seconds after a test case is running.
21501 *
21502 * The user should call lpfc_debugfs_multixripools_write before running a test
21503 * case to clear stat_snapshot_taken. Then the user starts a test case. During
21504 * test case is running, stat_snapshot_taken is incremented by 1 every time when
21505 * this routine is called from heartbeat timer. When stat_snapshot_taken is
21506 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21507 **/
lpfc_snapshot_mxp(struct lpfc_hba * phba,u32 hwqid)21508 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21509 {
21510 struct lpfc_sli4_hdw_queue *qp;
21511 struct lpfc_multixri_pool *multixri_pool;
21512 struct lpfc_pvt_pool *pvt_pool;
21513 struct lpfc_pbl_pool *pbl_pool;
21514 u32 txcmplq_cnt;
21515
21516 qp = &phba->sli4_hba.hdwq[hwqid];
21517 multixri_pool = qp->p_multixri_pool;
21518 if (!multixri_pool)
21519 return;
21520
21521 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21522 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21523 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21524 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21525
21526 multixri_pool->stat_pbl_count = pbl_pool->count;
21527 multixri_pool->stat_pvt_count = pvt_pool->count;
21528 multixri_pool->stat_busy_count = txcmplq_cnt;
21529 }
21530
21531 multixri_pool->stat_snapshot_taken++;
21532 }
21533 #endif
21534
21535 /**
21536 * lpfc_adjust_pvt_pool_count - Adjust private pool count
21537 * @phba: pointer to lpfc hba data structure.
21538 * @hwqid: belong to which HWQ.
21539 *
21540 * This routine moves some XRIs from private to public pool when private pool
21541 * is not busy.
21542 **/
lpfc_adjust_pvt_pool_count(struct lpfc_hba * phba,u32 hwqid)21543 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21544 {
21545 struct lpfc_multixri_pool *multixri_pool;
21546 u32 io_req_count;
21547 u32 prev_io_req_count;
21548
21549 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21550 if (!multixri_pool)
21551 return;
21552 io_req_count = multixri_pool->io_req_count;
21553 prev_io_req_count = multixri_pool->prev_io_req_count;
21554
21555 if (prev_io_req_count != io_req_count) {
21556 /* Private pool is busy */
21557 multixri_pool->prev_io_req_count = io_req_count;
21558 } else {
21559 /* Private pool is not busy.
21560 * Move XRIs from private to public pool.
21561 */
21562 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21563 }
21564 }
21565
21566 /**
21567 * lpfc_adjust_high_watermark - Adjust high watermark
21568 * @phba: pointer to lpfc hba data structure.
21569 * @hwqid: belong to which HWQ.
21570 *
21571 * This routine sets high watermark as number of outstanding XRIs,
21572 * but make sure the new value is between xri_limit/2 and xri_limit.
21573 **/
lpfc_adjust_high_watermark(struct lpfc_hba * phba,u32 hwqid)21574 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21575 {
21576 u32 new_watermark;
21577 u32 watermark_max;
21578 u32 watermark_min;
21579 u32 xri_limit;
21580 u32 txcmplq_cnt;
21581 u32 abts_io_bufs;
21582 struct lpfc_multixri_pool *multixri_pool;
21583 struct lpfc_sli4_hdw_queue *qp;
21584
21585 qp = &phba->sli4_hba.hdwq[hwqid];
21586 multixri_pool = qp->p_multixri_pool;
21587 if (!multixri_pool)
21588 return;
21589 xri_limit = multixri_pool->xri_limit;
21590
21591 watermark_max = xri_limit;
21592 watermark_min = xri_limit / 2;
21593
21594 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21595 abts_io_bufs = qp->abts_scsi_io_bufs;
21596 abts_io_bufs += qp->abts_nvme_io_bufs;
21597
21598 new_watermark = txcmplq_cnt + abts_io_bufs;
21599 new_watermark = min(watermark_max, new_watermark);
21600 new_watermark = max(watermark_min, new_watermark);
21601 multixri_pool->pvt_pool.high_watermark = new_watermark;
21602
21603 #ifdef LPFC_MXP_STAT
21604 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21605 new_watermark);
21606 #endif
21607 }
21608
21609 /**
21610 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21611 * @phba: pointer to lpfc hba data structure.
21612 * @hwqid: belong to which HWQ.
21613 *
21614 * This routine is called from hearbeat timer when pvt_pool is idle.
21615 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21616 * The first step moves (all - low_watermark) amount of XRIs.
21617 * The second step moves the rest of XRIs.
21618 **/
lpfc_move_xri_pvt_to_pbl(struct lpfc_hba * phba,u32 hwqid)21619 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21620 {
21621 struct lpfc_pbl_pool *pbl_pool;
21622 struct lpfc_pvt_pool *pvt_pool;
21623 struct lpfc_sli4_hdw_queue *qp;
21624 struct lpfc_io_buf *lpfc_ncmd;
21625 struct lpfc_io_buf *lpfc_ncmd_next;
21626 unsigned long iflag;
21627 struct list_head tmp_list;
21628 u32 tmp_count;
21629
21630 qp = &phba->sli4_hba.hdwq[hwqid];
21631 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21632 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21633 tmp_count = 0;
21634
21635 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21636 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21637
21638 if (pvt_pool->count > pvt_pool->low_watermark) {
21639 /* Step 1: move (all - low_watermark) from pvt_pool
21640 * to pbl_pool
21641 */
21642
21643 /* Move low watermark of bufs from pvt_pool to tmp_list */
21644 INIT_LIST_HEAD(&tmp_list);
21645 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21646 &pvt_pool->list, list) {
21647 list_move_tail(&lpfc_ncmd->list, &tmp_list);
21648 tmp_count++;
21649 if (tmp_count >= pvt_pool->low_watermark)
21650 break;
21651 }
21652
21653 /* Move all bufs from pvt_pool to pbl_pool */
21654 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21655
21656 /* Move all bufs from tmp_list to pvt_pool */
21657 list_splice(&tmp_list, &pvt_pool->list);
21658
21659 pbl_pool->count += (pvt_pool->count - tmp_count);
21660 pvt_pool->count = tmp_count;
21661 } else {
21662 /* Step 2: move the rest from pvt_pool to pbl_pool */
21663 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21664 pbl_pool->count += pvt_pool->count;
21665 pvt_pool->count = 0;
21666 }
21667
21668 spin_unlock(&pvt_pool->lock);
21669 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21670 }
21671
21672 /**
21673 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21674 * @phba: pointer to lpfc hba data structure
21675 * @qp: pointer to HDW queue
21676 * @pbl_pool: specified public free XRI pool
21677 * @pvt_pool: specified private free XRI pool
21678 * @count: number of XRIs to move
21679 *
21680 * This routine tries to move some free common bufs from the specified pbl_pool
21681 * to the specified pvt_pool. It might move less than count XRIs if there's not
21682 * enough in public pool.
21683 *
21684 * Return:
21685 * true - if XRIs are successfully moved from the specified pbl_pool to the
21686 * specified pvt_pool
21687 * false - if the specified pbl_pool is empty or locked by someone else
21688 **/
21689 static bool
_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_pbl_pool * pbl_pool,struct lpfc_pvt_pool * pvt_pool,u32 count)21690 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21691 struct lpfc_pbl_pool *pbl_pool,
21692 struct lpfc_pvt_pool *pvt_pool, u32 count)
21693 {
21694 struct lpfc_io_buf *lpfc_ncmd;
21695 struct lpfc_io_buf *lpfc_ncmd_next;
21696 unsigned long iflag;
21697 int ret;
21698
21699 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21700 if (ret) {
21701 if (pbl_pool->count) {
21702 /* Move a batch of XRIs from public to private pool */
21703 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21704 list_for_each_entry_safe(lpfc_ncmd,
21705 lpfc_ncmd_next,
21706 &pbl_pool->list,
21707 list) {
21708 list_move_tail(&lpfc_ncmd->list,
21709 &pvt_pool->list);
21710 pvt_pool->count++;
21711 pbl_pool->count--;
21712 count--;
21713 if (count == 0)
21714 break;
21715 }
21716
21717 spin_unlock(&pvt_pool->lock);
21718 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21719 return true;
21720 }
21721 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21722 }
21723
21724 return false;
21725 }
21726
21727 /**
21728 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21729 * @phba: pointer to lpfc hba data structure.
21730 * @hwqid: belong to which HWQ.
21731 * @count: number of XRIs to move
21732 *
21733 * This routine tries to find some free common bufs in one of public pools with
21734 * Round Robin method. The search always starts from local hwqid, then the next
21735 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21736 * a batch of free common bufs are moved to private pool on hwqid.
21737 * It might move less than count XRIs if there's not enough in public pool.
21738 **/
lpfc_move_xri_pbl_to_pvt(struct lpfc_hba * phba,u32 hwqid,u32 count)21739 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21740 {
21741 struct lpfc_multixri_pool *multixri_pool;
21742 struct lpfc_multixri_pool *next_multixri_pool;
21743 struct lpfc_pvt_pool *pvt_pool;
21744 struct lpfc_pbl_pool *pbl_pool;
21745 struct lpfc_sli4_hdw_queue *qp;
21746 u32 next_hwqid;
21747 u32 hwq_count;
21748 int ret;
21749
21750 qp = &phba->sli4_hba.hdwq[hwqid];
21751 multixri_pool = qp->p_multixri_pool;
21752 pvt_pool = &multixri_pool->pvt_pool;
21753 pbl_pool = &multixri_pool->pbl_pool;
21754
21755 /* Check if local pbl_pool is available */
21756 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21757 if (ret) {
21758 #ifdef LPFC_MXP_STAT
21759 multixri_pool->local_pbl_hit_count++;
21760 #endif
21761 return;
21762 }
21763
21764 hwq_count = phba->cfg_hdw_queue;
21765
21766 /* Get the next hwqid which was found last time */
21767 next_hwqid = multixri_pool->rrb_next_hwqid;
21768
21769 do {
21770 /* Go to next hwq */
21771 next_hwqid = (next_hwqid + 1) % hwq_count;
21772
21773 next_multixri_pool =
21774 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21775 pbl_pool = &next_multixri_pool->pbl_pool;
21776
21777 /* Check if the public free xri pool is available */
21778 ret = _lpfc_move_xri_pbl_to_pvt(
21779 phba, qp, pbl_pool, pvt_pool, count);
21780
21781 /* Exit while-loop if success or all hwqid are checked */
21782 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21783
21784 /* Starting point for the next time */
21785 multixri_pool->rrb_next_hwqid = next_hwqid;
21786
21787 if (!ret) {
21788 /* stats: all public pools are empty*/
21789 multixri_pool->pbl_empty_count++;
21790 }
21791
21792 #ifdef LPFC_MXP_STAT
21793 if (ret) {
21794 if (next_hwqid == hwqid)
21795 multixri_pool->local_pbl_hit_count++;
21796 else
21797 multixri_pool->other_pbl_hit_count++;
21798 }
21799 #endif
21800 }
21801
21802 /**
21803 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21804 * @phba: pointer to lpfc hba data structure.
21805 * @hwqid: belong to which HWQ.
21806 *
21807 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21808 * low watermark.
21809 **/
lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba * phba,u32 hwqid)21810 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21811 {
21812 struct lpfc_multixri_pool *multixri_pool;
21813 struct lpfc_pvt_pool *pvt_pool;
21814
21815 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21816 pvt_pool = &multixri_pool->pvt_pool;
21817
21818 if (pvt_pool->count < pvt_pool->low_watermark)
21819 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21820 }
21821
21822 /**
21823 * lpfc_release_io_buf - Return one IO buf back to free pool
21824 * @phba: pointer to lpfc hba data structure.
21825 * @lpfc_ncmd: IO buf to be returned.
21826 * @qp: belong to which HWQ.
21827 *
21828 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21829 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21830 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21831 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
21832 * lpfc_io_buf_list_put.
21833 **/
lpfc_release_io_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_ncmd,struct lpfc_sli4_hdw_queue * qp)21834 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21835 struct lpfc_sli4_hdw_queue *qp)
21836 {
21837 unsigned long iflag;
21838 struct lpfc_pbl_pool *pbl_pool;
21839 struct lpfc_pvt_pool *pvt_pool;
21840 struct lpfc_epd_pool *epd_pool;
21841 u32 txcmplq_cnt;
21842 u32 xri_owned;
21843 u32 xri_limit;
21844 u32 abts_io_bufs;
21845
21846 /* MUST zero fields if buffer is reused by another protocol */
21847 lpfc_ncmd->nvmeCmd = NULL;
21848 lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21849
21850 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21851 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21852 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21853
21854 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21855 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21856
21857 if (phba->cfg_xri_rebalancing) {
21858 if (lpfc_ncmd->expedite) {
21859 /* Return to expedite pool */
21860 epd_pool = &phba->epd_pool;
21861 spin_lock_irqsave(&epd_pool->lock, iflag);
21862 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21863 epd_pool->count++;
21864 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21865 return;
21866 }
21867
21868 /* Avoid invalid access if an IO sneaks in and is being rejected
21869 * just _after_ xri pools are destroyed in lpfc_offline.
21870 * Nothing much can be done at this point.
21871 */
21872 if (!qp->p_multixri_pool)
21873 return;
21874
21875 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21876 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21877
21878 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21879 abts_io_bufs = qp->abts_scsi_io_bufs;
21880 abts_io_bufs += qp->abts_nvme_io_bufs;
21881
21882 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21883 xri_limit = qp->p_multixri_pool->xri_limit;
21884
21885 #ifdef LPFC_MXP_STAT
21886 if (xri_owned <= xri_limit)
21887 qp->p_multixri_pool->below_limit_count++;
21888 else
21889 qp->p_multixri_pool->above_limit_count++;
21890 #endif
21891
21892 /* XRI goes to either public or private free xri pool
21893 * based on watermark and xri_limit
21894 */
21895 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21896 (xri_owned < xri_limit &&
21897 pvt_pool->count < pvt_pool->high_watermark)) {
21898 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21899 qp, free_pvt_pool);
21900 list_add_tail(&lpfc_ncmd->list,
21901 &pvt_pool->list);
21902 pvt_pool->count++;
21903 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21904 } else {
21905 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21906 qp, free_pub_pool);
21907 list_add_tail(&lpfc_ncmd->list,
21908 &pbl_pool->list);
21909 pbl_pool->count++;
21910 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21911 }
21912 } else {
21913 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21914 qp, free_xri);
21915 list_add_tail(&lpfc_ncmd->list,
21916 &qp->lpfc_io_buf_list_put);
21917 qp->put_io_bufs++;
21918 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21919 iflag);
21920 }
21921 }
21922
21923 /**
21924 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21925 * @phba: pointer to lpfc hba data structure.
21926 * @qp: pointer to HDW queue
21927 * @pvt_pool: pointer to private pool data structure.
21928 * @ndlp: pointer to lpfc nodelist data structure.
21929 *
21930 * This routine tries to get one free IO buf from private pool.
21931 *
21932 * Return:
21933 * pointer to one free IO buf - if private pool is not empty
21934 * NULL - if private pool is empty
21935 **/
21936 static struct lpfc_io_buf *
lpfc_get_io_buf_from_private_pool(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_pvt_pool * pvt_pool,struct lpfc_nodelist * ndlp)21937 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21938 struct lpfc_sli4_hdw_queue *qp,
21939 struct lpfc_pvt_pool *pvt_pool,
21940 struct lpfc_nodelist *ndlp)
21941 {
21942 struct lpfc_io_buf *lpfc_ncmd;
21943 struct lpfc_io_buf *lpfc_ncmd_next;
21944 unsigned long iflag;
21945
21946 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21947 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21948 &pvt_pool->list, list) {
21949 if (lpfc_test_rrq_active(
21950 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21951 continue;
21952 list_del(&lpfc_ncmd->list);
21953 pvt_pool->count--;
21954 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21955 return lpfc_ncmd;
21956 }
21957 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21958
21959 return NULL;
21960 }
21961
21962 /**
21963 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21964 * @phba: pointer to lpfc hba data structure.
21965 *
21966 * This routine tries to get one free IO buf from expedite pool.
21967 *
21968 * Return:
21969 * pointer to one free IO buf - if expedite pool is not empty
21970 * NULL - if expedite pool is empty
21971 **/
21972 static struct lpfc_io_buf *
lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba * phba)21973 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21974 {
21975 struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
21976 struct lpfc_io_buf *lpfc_ncmd_next;
21977 unsigned long iflag;
21978 struct lpfc_epd_pool *epd_pool;
21979
21980 epd_pool = &phba->epd_pool;
21981
21982 spin_lock_irqsave(&epd_pool->lock, iflag);
21983 if (epd_pool->count > 0) {
21984 list_for_each_entry_safe(iter, lpfc_ncmd_next,
21985 &epd_pool->list, list) {
21986 list_del(&iter->list);
21987 epd_pool->count--;
21988 lpfc_ncmd = iter;
21989 break;
21990 }
21991 }
21992 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21993
21994 return lpfc_ncmd;
21995 }
21996
21997 /**
21998 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21999 * @phba: pointer to lpfc hba data structure.
22000 * @ndlp: pointer to lpfc nodelist data structure.
22001 * @hwqid: belong to which HWQ
22002 * @expedite: 1 means this request is urgent.
22003 *
22004 * This routine will do the following actions and then return a pointer to
22005 * one free IO buf.
22006 *
22007 * 1. If private free xri count is empty, move some XRIs from public to
22008 * private pool.
22009 * 2. Get one XRI from private free xri pool.
22010 * 3. If we fail to get one from pvt_pool and this is an expedite request,
22011 * get one free xri from expedite pool.
22012 *
22013 * Note: ndlp is only used on SCSI side for RRQ testing.
22014 * The caller should pass NULL for ndlp on NVME side.
22015 *
22016 * Return:
22017 * pointer to one free IO buf - if private pool is not empty
22018 * NULL - if private pool is empty
22019 **/
22020 static struct lpfc_io_buf *
lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,int hwqid,int expedite)22021 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
22022 struct lpfc_nodelist *ndlp,
22023 int hwqid, int expedite)
22024 {
22025 struct lpfc_sli4_hdw_queue *qp;
22026 struct lpfc_multixri_pool *multixri_pool;
22027 struct lpfc_pvt_pool *pvt_pool;
22028 struct lpfc_io_buf *lpfc_ncmd;
22029
22030 qp = &phba->sli4_hba.hdwq[hwqid];
22031 lpfc_ncmd = NULL;
22032 if (!qp) {
22033 lpfc_printf_log(phba, KERN_INFO,
22034 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22035 "5556 NULL qp for hwqid x%x\n", hwqid);
22036 return lpfc_ncmd;
22037 }
22038 multixri_pool = qp->p_multixri_pool;
22039 if (!multixri_pool) {
22040 lpfc_printf_log(phba, KERN_INFO,
22041 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22042 "5557 NULL multixri for hwqid x%x\n", hwqid);
22043 return lpfc_ncmd;
22044 }
22045 pvt_pool = &multixri_pool->pvt_pool;
22046 if (!pvt_pool) {
22047 lpfc_printf_log(phba, KERN_INFO,
22048 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22049 "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
22050 return lpfc_ncmd;
22051 }
22052 multixri_pool->io_req_count++;
22053
22054 /* If pvt_pool is empty, move some XRIs from public to private pool */
22055 if (pvt_pool->count == 0)
22056 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
22057
22058 /* Get one XRI from private free xri pool */
22059 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
22060
22061 if (lpfc_ncmd) {
22062 lpfc_ncmd->hdwq = qp;
22063 lpfc_ncmd->hdwq_no = hwqid;
22064 } else if (expedite) {
22065 /* If we fail to get one from pvt_pool and this is an expedite
22066 * request, get one free xri from expedite pool.
22067 */
22068 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
22069 }
22070
22071 return lpfc_ncmd;
22072 }
22073
22074 static inline struct lpfc_io_buf *
lpfc_io_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,int idx)22075 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
22076 {
22077 struct lpfc_sli4_hdw_queue *qp;
22078 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
22079
22080 qp = &phba->sli4_hba.hdwq[idx];
22081 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
22082 &qp->lpfc_io_buf_list_get, list) {
22083 if (lpfc_test_rrq_active(phba, ndlp,
22084 lpfc_cmd->cur_iocbq.sli4_lxritag))
22085 continue;
22086
22087 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
22088 continue;
22089
22090 list_del_init(&lpfc_cmd->list);
22091 qp->get_io_bufs--;
22092 lpfc_cmd->hdwq = qp;
22093 lpfc_cmd->hdwq_no = idx;
22094 return lpfc_cmd;
22095 }
22096 return NULL;
22097 }
22098
22099 /**
22100 * lpfc_get_io_buf - Get one IO buffer from free pool
22101 * @phba: The HBA for which this call is being executed.
22102 * @ndlp: pointer to lpfc nodelist data structure.
22103 * @hwqid: belong to which HWQ
22104 * @expedite: 1 means this request is urgent.
22105 *
22106 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
22107 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
22108 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
22109 *
22110 * Note: ndlp is only used on SCSI side for RRQ testing.
22111 * The caller should pass NULL for ndlp on NVME side.
22112 *
22113 * Return codes:
22114 * NULL - Error
22115 * Pointer to lpfc_io_buf - Success
22116 **/
lpfc_get_io_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,u32 hwqid,int expedite)22117 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
22118 struct lpfc_nodelist *ndlp,
22119 u32 hwqid, int expedite)
22120 {
22121 struct lpfc_sli4_hdw_queue *qp;
22122 unsigned long iflag;
22123 struct lpfc_io_buf *lpfc_cmd;
22124
22125 qp = &phba->sli4_hba.hdwq[hwqid];
22126 lpfc_cmd = NULL;
22127 if (!qp) {
22128 lpfc_printf_log(phba, KERN_WARNING,
22129 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22130 "5555 NULL qp for hwqid x%x\n", hwqid);
22131 return lpfc_cmd;
22132 }
22133
22134 if (phba->cfg_xri_rebalancing)
22135 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
22136 phba, ndlp, hwqid, expedite);
22137 else {
22138 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
22139 qp, alloc_xri_get);
22140 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
22141 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22142 if (!lpfc_cmd) {
22143 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
22144 qp, alloc_xri_put);
22145 list_splice(&qp->lpfc_io_buf_list_put,
22146 &qp->lpfc_io_buf_list_get);
22147 qp->get_io_bufs += qp->put_io_bufs;
22148 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
22149 qp->put_io_bufs = 0;
22150 spin_unlock(&qp->io_buf_list_put_lock);
22151 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
22152 expedite)
22153 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22154 }
22155 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
22156 }
22157
22158 return lpfc_cmd;
22159 }
22160
22161 /**
22162 * lpfc_read_object - Retrieve object data from HBA
22163 * @phba: The HBA for which this call is being executed.
22164 * @rdobject: Pathname of object data we want to read.
22165 * @datap: Pointer to where data will be copied to.
22166 * @datasz: size of data area
22167 *
22168 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
22169 * The data will be truncated if datasz is not large enough.
22170 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
22171 * Returns the actual bytes read from the object.
22172 */
22173 int
lpfc_read_object(struct lpfc_hba * phba,char * rdobject,uint32_t * datap,uint32_t datasz)22174 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
22175 uint32_t datasz)
22176 {
22177 struct lpfc_mbx_read_object *read_object;
22178 LPFC_MBOXQ_t *mbox;
22179 int rc, length, eof, j, byte_cnt = 0;
22180 uint32_t shdr_status, shdr_add_status;
22181 union lpfc_sli4_cfg_shdr *shdr;
22182 struct lpfc_dmabuf *pcmd;
22183 u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
22184
22185 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
22186 if (!mbox)
22187 return -ENOMEM;
22188 length = (sizeof(struct lpfc_mbx_read_object) -
22189 sizeof(struct lpfc_sli4_cfg_mhdr));
22190 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
22191 LPFC_MBOX_OPCODE_READ_OBJECT,
22192 length, LPFC_SLI4_MBX_EMBED);
22193 read_object = &mbox->u.mqe.un.read_object;
22194 shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
22195
22196 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
22197 bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
22198 read_object->u.request.rd_object_offset = 0;
22199 read_object->u.request.rd_object_cnt = 1;
22200
22201 memset((void *)read_object->u.request.rd_object_name, 0,
22202 LPFC_OBJ_NAME_SZ);
22203 scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
22204 for (j = 0; j < strlen(rdobject); j++)
22205 read_object->u.request.rd_object_name[j] =
22206 cpu_to_le32(rd_object_name[j]);
22207
22208 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
22209 if (pcmd)
22210 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
22211 if (!pcmd || !pcmd->virt) {
22212 kfree(pcmd);
22213 mempool_free(mbox, phba->mbox_mem_pool);
22214 return -ENOMEM;
22215 }
22216 memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
22217 read_object->u.request.rd_object_hbuf[0].pa_lo =
22218 putPaddrLow(pcmd->phys);
22219 read_object->u.request.rd_object_hbuf[0].pa_hi =
22220 putPaddrHigh(pcmd->phys);
22221 read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
22222
22223 mbox->vport = phba->pport;
22224 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
22225 mbox->ctx_ndlp = NULL;
22226
22227 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
22228 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
22229 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
22230
22231 if (shdr_status == STATUS_FAILED &&
22232 shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
22233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22234 "4674 No port cfg file in FW.\n");
22235 byte_cnt = -ENOENT;
22236 } else if (shdr_status || shdr_add_status || rc) {
22237 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22238 "2625 READ_OBJECT mailbox failed with "
22239 "status x%x add_status x%x, mbx status x%x\n",
22240 shdr_status, shdr_add_status, rc);
22241 byte_cnt = -ENXIO;
22242 } else {
22243 /* Success */
22244 length = read_object->u.response.rd_object_actual_rlen;
22245 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
22246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
22247 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
22248 length, datasz, eof);
22249
22250 /* Detect the port config file exists but is empty */
22251 if (!length && eof) {
22252 byte_cnt = 0;
22253 goto exit;
22254 }
22255
22256 byte_cnt = length;
22257 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
22258 }
22259
22260 exit:
22261 /* This is an embedded SLI4 mailbox with an external buffer allocated.
22262 * Free the pcmd and then cleanup with the correct routine.
22263 */
22264 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
22265 kfree(pcmd);
22266 lpfc_sli4_mbox_cmd_free(phba, mbox);
22267 return byte_cnt;
22268 }
22269
22270 /**
22271 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
22272 * @phba: The HBA for which this call is being executed.
22273 * @lpfc_buf: IO buf structure to append the SGL chunk
22274 *
22275 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
22276 * and will allocate an SGL chunk if the pool is empty.
22277 *
22278 * Return codes:
22279 * NULL - Error
22280 * Pointer to sli4_hybrid_sgl - Success
22281 **/
22282 struct sli4_hybrid_sgl *
lpfc_get_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22283 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22284 {
22285 struct sli4_hybrid_sgl *list_entry = NULL;
22286 struct sli4_hybrid_sgl *tmp = NULL;
22287 struct sli4_hybrid_sgl *allocated_sgl = NULL;
22288 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22289 struct list_head *buf_list = &hdwq->sgl_list;
22290 unsigned long iflags;
22291
22292 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22293
22294 if (likely(!list_empty(buf_list))) {
22295 /* break off 1 chunk from the sgl_list */
22296 list_for_each_entry_safe(list_entry, tmp,
22297 buf_list, list_node) {
22298 list_move_tail(&list_entry->list_node,
22299 &lpfc_buf->dma_sgl_xtra_list);
22300 break;
22301 }
22302 } else {
22303 /* allocate more */
22304 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22305 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22306 cpu_to_node(hdwq->io_wq->chann));
22307 if (!tmp) {
22308 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22309 "8353 error kmalloc memory for HDWQ "
22310 "%d %s\n",
22311 lpfc_buf->hdwq_no, __func__);
22312 return NULL;
22313 }
22314
22315 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
22316 GFP_ATOMIC, &tmp->dma_phys_sgl);
22317 if (!tmp->dma_sgl) {
22318 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22319 "8354 error pool_alloc memory for HDWQ "
22320 "%d %s\n",
22321 lpfc_buf->hdwq_no, __func__);
22322 kfree(tmp);
22323 return NULL;
22324 }
22325
22326 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22327 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
22328 }
22329
22330 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
22331 struct sli4_hybrid_sgl,
22332 list_node);
22333
22334 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22335
22336 return allocated_sgl;
22337 }
22338
22339 /**
22340 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22341 * @phba: The HBA for which this call is being executed.
22342 * @lpfc_buf: IO buf structure with the SGL chunk
22343 *
22344 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22345 *
22346 * Return codes:
22347 * 0 - Success
22348 * -EINVAL - Error
22349 **/
22350 int
lpfc_put_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22351 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22352 {
22353 int rc = 0;
22354 struct sli4_hybrid_sgl *list_entry = NULL;
22355 struct sli4_hybrid_sgl *tmp = NULL;
22356 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22357 struct list_head *buf_list = &hdwq->sgl_list;
22358 unsigned long iflags;
22359
22360 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22361
22362 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22363 list_for_each_entry_safe(list_entry, tmp,
22364 &lpfc_buf->dma_sgl_xtra_list,
22365 list_node) {
22366 list_move_tail(&list_entry->list_node,
22367 buf_list);
22368 }
22369 } else {
22370 rc = -EINVAL;
22371 }
22372
22373 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22374 return rc;
22375 }
22376
22377 /**
22378 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22379 * @phba: phba object
22380 * @hdwq: hdwq to cleanup sgl buff resources on
22381 *
22382 * This routine frees all SGL chunks of hdwq SGL chunk pool.
22383 *
22384 * Return codes:
22385 * None
22386 **/
22387 void
lpfc_free_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * hdwq)22388 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22389 struct lpfc_sli4_hdw_queue *hdwq)
22390 {
22391 struct list_head *buf_list = &hdwq->sgl_list;
22392 struct sli4_hybrid_sgl *list_entry = NULL;
22393 struct sli4_hybrid_sgl *tmp = NULL;
22394 unsigned long iflags;
22395
22396 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22397
22398 /* Free sgl pool */
22399 list_for_each_entry_safe(list_entry, tmp,
22400 buf_list, list_node) {
22401 list_del(&list_entry->list_node);
22402 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22403 list_entry->dma_sgl,
22404 list_entry->dma_phys_sgl);
22405 kfree(list_entry);
22406 }
22407
22408 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22409 }
22410
22411 /**
22412 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22413 * @phba: The HBA for which this call is being executed.
22414 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22415 *
22416 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22417 * and will allocate an CMD/RSP buffer if the pool is empty.
22418 *
22419 * Return codes:
22420 * NULL - Error
22421 * Pointer to fcp_cmd_rsp_buf - Success
22422 **/
22423 struct fcp_cmd_rsp_buf *
lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22424 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22425 struct lpfc_io_buf *lpfc_buf)
22426 {
22427 struct fcp_cmd_rsp_buf *list_entry = NULL;
22428 struct fcp_cmd_rsp_buf *tmp = NULL;
22429 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22430 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22431 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22432 unsigned long iflags;
22433
22434 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22435
22436 if (likely(!list_empty(buf_list))) {
22437 /* break off 1 chunk from the list */
22438 list_for_each_entry_safe(list_entry, tmp,
22439 buf_list,
22440 list_node) {
22441 list_move_tail(&list_entry->list_node,
22442 &lpfc_buf->dma_cmd_rsp_list);
22443 break;
22444 }
22445 } else {
22446 /* allocate more */
22447 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22448 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22449 cpu_to_node(hdwq->io_wq->chann));
22450 if (!tmp) {
22451 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22452 "8355 error kmalloc memory for HDWQ "
22453 "%d %s\n",
22454 lpfc_buf->hdwq_no, __func__);
22455 return NULL;
22456 }
22457
22458 tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
22459 GFP_ATOMIC,
22460 &tmp->fcp_cmd_rsp_dma_handle);
22461
22462 if (!tmp->fcp_cmnd) {
22463 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22464 "8356 error pool_alloc memory for HDWQ "
22465 "%d %s\n",
22466 lpfc_buf->hdwq_no, __func__);
22467 kfree(tmp);
22468 return NULL;
22469 }
22470
22471 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22472 sizeof(struct fcp_cmnd));
22473
22474 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22475 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22476 }
22477
22478 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22479 struct fcp_cmd_rsp_buf,
22480 list_node);
22481
22482 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22483
22484 return allocated_buf;
22485 }
22486
22487 /**
22488 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22489 * @phba: The HBA for which this call is being executed.
22490 * @lpfc_buf: IO buf structure with the CMD/RSP buf
22491 *
22492 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22493 *
22494 * Return codes:
22495 * 0 - Success
22496 * -EINVAL - Error
22497 **/
22498 int
lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22499 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22500 struct lpfc_io_buf *lpfc_buf)
22501 {
22502 int rc = 0;
22503 struct fcp_cmd_rsp_buf *list_entry = NULL;
22504 struct fcp_cmd_rsp_buf *tmp = NULL;
22505 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22506 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22507 unsigned long iflags;
22508
22509 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22510
22511 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22512 list_for_each_entry_safe(list_entry, tmp,
22513 &lpfc_buf->dma_cmd_rsp_list,
22514 list_node) {
22515 list_move_tail(&list_entry->list_node,
22516 buf_list);
22517 }
22518 } else {
22519 rc = -EINVAL;
22520 }
22521
22522 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22523 return rc;
22524 }
22525
22526 /**
22527 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22528 * @phba: phba object
22529 * @hdwq: hdwq to cleanup cmd rsp buff resources on
22530 *
22531 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22532 *
22533 * Return codes:
22534 * None
22535 **/
22536 void
lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * hdwq)22537 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22538 struct lpfc_sli4_hdw_queue *hdwq)
22539 {
22540 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22541 struct fcp_cmd_rsp_buf *list_entry = NULL;
22542 struct fcp_cmd_rsp_buf *tmp = NULL;
22543 unsigned long iflags;
22544
22545 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22546
22547 /* Free cmd_rsp buf pool */
22548 list_for_each_entry_safe(list_entry, tmp,
22549 buf_list,
22550 list_node) {
22551 list_del(&list_entry->list_node);
22552 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22553 list_entry->fcp_cmnd,
22554 list_entry->fcp_cmd_rsp_dma_handle);
22555 kfree(list_entry);
22556 }
22557
22558 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22559 }
22560
22561 /**
22562 * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
22563 * @phba: phba object
22564 * @job: job entry of the command to be posted.
22565 *
22566 * Fill the common fields of the wqe for each of the command.
22567 *
22568 * Return codes:
22569 * None
22570 **/
22571 void
lpfc_sli_prep_wqe(struct lpfc_hba * phba,struct lpfc_iocbq * job)22572 lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22573 {
22574 u8 cmnd;
22575 u32 *pcmd;
22576 u32 if_type = 0;
22577 u32 fip, abort_tag;
22578 struct lpfc_nodelist *ndlp = NULL;
22579 union lpfc_wqe128 *wqe = &job->wqe;
22580 u8 command_type = ELS_COMMAND_NON_FIP;
22581
22582 fip = phba->hba_flag & HBA_FIP_SUPPORT;
22583 /* The fcp commands will set command type */
22584 if (job->cmd_flag & LPFC_IO_FCP)
22585 command_type = FCP_COMMAND;
22586 else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22587 command_type = ELS_COMMAND_FIP;
22588 else
22589 command_type = ELS_COMMAND_NON_FIP;
22590
22591 abort_tag = job->iotag;
22592 cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22593
22594 switch (cmnd) {
22595 case CMD_ELS_REQUEST64_WQE:
22596 ndlp = job->ndlp;
22597
22598 if_type = bf_get(lpfc_sli_intf_if_type,
22599 &phba->sli4_hba.sli_intf);
22600 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22601 pcmd = (u32 *)job->cmd_dmabuf->virt;
22602 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22603 *pcmd == ELS_CMD_SCR ||
22604 *pcmd == ELS_CMD_RDF ||
22605 *pcmd == ELS_CMD_EDC ||
22606 *pcmd == ELS_CMD_RSCN_XMT ||
22607 *pcmd == ELS_CMD_FDISC ||
22608 *pcmd == ELS_CMD_LOGO ||
22609 *pcmd == ELS_CMD_QFPA ||
22610 *pcmd == ELS_CMD_UVEM ||
22611 *pcmd == ELS_CMD_PLOGI)) {
22612 bf_set(els_req64_sp, &wqe->els_req, 1);
22613 bf_set(els_req64_sid, &wqe->els_req,
22614 job->vport->fc_myDID);
22615
22616 if ((*pcmd == ELS_CMD_FLOGI) &&
22617 !(phba->fc_topology ==
22618 LPFC_TOPOLOGY_LOOP))
22619 bf_set(els_req64_sid, &wqe->els_req, 0);
22620
22621 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22622 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22623 phba->vpi_ids[job->vport->vpi]);
22624 } else if (pcmd) {
22625 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22626 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22627 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22628 }
22629 }
22630
22631 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22632 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22633
22634 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22635 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22636 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22637 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22638 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22639 break;
22640 case CMD_XMIT_ELS_RSP64_WQE:
22641 ndlp = job->ndlp;
22642
22643 /* word4 */
22644 wqe->xmit_els_rsp.word4 = 0;
22645
22646 if_type = bf_get(lpfc_sli_intf_if_type,
22647 &phba->sli4_hba.sli_intf);
22648 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22649 if (job->vport->fc_flag & FC_PT2PT) {
22650 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22651 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22652 job->vport->fc_myDID);
22653 if (job->vport->fc_myDID == Fabric_DID) {
22654 bf_set(wqe_els_did,
22655 &wqe->xmit_els_rsp.wqe_dest, 0);
22656 }
22657 }
22658 }
22659
22660 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22661 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22662 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22663 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22664 LPFC_WQE_LENLOC_WORD3);
22665 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22666
22667 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22668 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22669 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22670 job->vport->fc_myDID);
22671 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22672 }
22673
22674 if (phba->sli_rev == LPFC_SLI_REV4) {
22675 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22676 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22677
22678 if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22679 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22680 phba->vpi_ids[job->vport->vpi]);
22681 }
22682 command_type = OTHER_COMMAND;
22683 break;
22684 case CMD_GEN_REQUEST64_WQE:
22685 /* Word 10 */
22686 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22687 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22688 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22689 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22690 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22691 command_type = OTHER_COMMAND;
22692 break;
22693 case CMD_XMIT_SEQUENCE64_WQE:
22694 if (phba->link_flag & LS_LOOPBACK_MODE)
22695 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22696
22697 wqe->xmit_sequence.rsvd3 = 0;
22698 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22699 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22700 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22701 LPFC_WQE_IOD_WRITE);
22702 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22703 LPFC_WQE_LENLOC_WORD12);
22704 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22705 command_type = OTHER_COMMAND;
22706 break;
22707 case CMD_XMIT_BLS_RSP64_WQE:
22708 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22709 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22710 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22711 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22712 phba->vpi_ids[phba->pport->vpi]);
22713 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22714 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22715 LPFC_WQE_LENLOC_NONE);
22716 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
22717 command_type = OTHER_COMMAND;
22718 break;
22719 case CMD_FCP_ICMND64_WQE: /* task mgmt commands */
22720 case CMD_ABORT_XRI_WQE: /* abort iotag */
22721 case CMD_SEND_FRAME: /* mds loopback */
22722 /* cases already formatted for sli4 wqe - no chgs necessary */
22723 return;
22724 default:
22725 dump_stack();
22726 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22727 "6207 Invalid command 0x%x\n",
22728 cmnd);
22729 break;
22730 }
22731
22732 wqe->generic.wqe_com.abort_tag = abort_tag;
22733 bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22734 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22735 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
22736 }
22737