xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_sli.c (revision bc5aa3a0)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/lockdep.h>
28 
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport_fc.h>
34 #include <scsi/fc/fc_fs.h>
35 #include <linux/aer.h>
36 
37 #include "lpfc_hw4.h"
38 #include "lpfc_hw.h"
39 #include "lpfc_sli.h"
40 #include "lpfc_sli4.h"
41 #include "lpfc_nl.h"
42 #include "lpfc_disc.h"
43 #include "lpfc_scsi.h"
44 #include "lpfc.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_logmsg.h"
47 #include "lpfc_compat.h"
48 #include "lpfc_debugfs.h"
49 #include "lpfc_vport.h"
50 
51 /* There are only four IOCB completion types. */
52 typedef enum _lpfc_iocb_type {
53 	LPFC_UNKNOWN_IOCB,
54 	LPFC_UNSOL_IOCB,
55 	LPFC_SOL_IOCB,
56 	LPFC_ABORT_IOCB
57 } lpfc_iocb_type;
58 
59 
60 /* Provide function prototypes local to this module. */
61 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
62 				  uint32_t);
63 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
64 			      uint8_t *, uint32_t *);
65 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
66 							 struct lpfc_iocbq *);
67 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
68 				      struct hbq_dmabuf *);
69 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
70 				    struct lpfc_cqe *);
71 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
72 				       int);
73 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
74 			uint32_t);
75 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
76 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
77 
78 static IOCB_t *
79 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
80 {
81 	return &iocbq->iocb;
82 }
83 
84 /**
85  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
86  * @q: The Work Queue to operate on.
87  * @wqe: The work Queue Entry to put on the Work queue.
88  *
89  * This routine will copy the contents of @wqe to the next available entry on
90  * the @q. This function will then ring the Work Queue Doorbell to signal the
91  * HBA to start processing the Work Queue Entry. This function returns 0 if
92  * successful. If no entries are available on @q then this function will return
93  * -ENOMEM.
94  * The caller is expected to hold the hbalock when calling this routine.
95  **/
96 static uint32_t
97 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
98 {
99 	union lpfc_wqe *temp_wqe;
100 	struct lpfc_register doorbell;
101 	uint32_t host_index;
102 	uint32_t idx;
103 
104 	/* sanity check on queue memory */
105 	if (unlikely(!q))
106 		return -ENOMEM;
107 	temp_wqe = q->qe[q->host_index].wqe;
108 
109 	/* If the host has not yet processed the next entry then we are done */
110 	idx = ((q->host_index + 1) % q->entry_count);
111 	if (idx == q->hba_index) {
112 		q->WQ_overflow++;
113 		return -ENOMEM;
114 	}
115 	q->WQ_posted++;
116 	/* set consumption flag every once in a while */
117 	if (!((q->host_index + 1) % q->entry_repost))
118 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
119 	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
120 		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
121 	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
122 
123 	/* Update the host index before invoking device */
124 	host_index = q->host_index;
125 
126 	q->host_index = idx;
127 
128 	/* Ring Doorbell */
129 	doorbell.word0 = 0;
130 	if (q->db_format == LPFC_DB_LIST_FORMAT) {
131 		bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
132 		bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
133 		bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
134 	} else if (q->db_format == LPFC_DB_RING_FORMAT) {
135 		bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
136 		bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
137 	} else {
138 		return -EINVAL;
139 	}
140 	writel(doorbell.word0, q->db_regaddr);
141 
142 	return 0;
143 }
144 
145 /**
146  * lpfc_sli4_wq_release - Updates internal hba index for WQ
147  * @q: The Work Queue to operate on.
148  * @index: The index to advance the hba index to.
149  *
150  * This routine will update the HBA index of a queue to reflect consumption of
151  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
152  * an entry the host calls this function to update the queue's internal
153  * pointers. This routine returns the number of entries that were consumed by
154  * the HBA.
155  **/
156 static uint32_t
157 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
158 {
159 	uint32_t released = 0;
160 
161 	/* sanity check on queue memory */
162 	if (unlikely(!q))
163 		return 0;
164 
165 	if (q->hba_index == index)
166 		return 0;
167 	do {
168 		q->hba_index = ((q->hba_index + 1) % q->entry_count);
169 		released++;
170 	} while (q->hba_index != index);
171 	return released;
172 }
173 
174 /**
175  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
176  * @q: The Mailbox Queue to operate on.
177  * @wqe: The Mailbox Queue Entry to put on the Work queue.
178  *
179  * This routine will copy the contents of @mqe to the next available entry on
180  * the @q. This function will then ring the Work Queue Doorbell to signal the
181  * HBA to start processing the Work Queue Entry. This function returns 0 if
182  * successful. If no entries are available on @q then this function will return
183  * -ENOMEM.
184  * The caller is expected to hold the hbalock when calling this routine.
185  **/
186 static uint32_t
187 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
188 {
189 	struct lpfc_mqe *temp_mqe;
190 	struct lpfc_register doorbell;
191 
192 	/* sanity check on queue memory */
193 	if (unlikely(!q))
194 		return -ENOMEM;
195 	temp_mqe = q->qe[q->host_index].mqe;
196 
197 	/* If the host has not yet processed the next entry then we are done */
198 	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
199 		return -ENOMEM;
200 	lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
201 	/* Save off the mailbox pointer for completion */
202 	q->phba->mbox = (MAILBOX_t *)temp_mqe;
203 
204 	/* Update the host index before invoking device */
205 	q->host_index = ((q->host_index + 1) % q->entry_count);
206 
207 	/* Ring Doorbell */
208 	doorbell.word0 = 0;
209 	bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
210 	bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
211 	writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
212 	return 0;
213 }
214 
215 /**
216  * lpfc_sli4_mq_release - Updates internal hba index for MQ
217  * @q: The Mailbox Queue to operate on.
218  *
219  * This routine will update the HBA index of a queue to reflect consumption of
220  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
221  * an entry the host calls this function to update the queue's internal
222  * pointers. This routine returns the number of entries that were consumed by
223  * the HBA.
224  **/
225 static uint32_t
226 lpfc_sli4_mq_release(struct lpfc_queue *q)
227 {
228 	/* sanity check on queue memory */
229 	if (unlikely(!q))
230 		return 0;
231 
232 	/* Clear the mailbox pointer for completion */
233 	q->phba->mbox = NULL;
234 	q->hba_index = ((q->hba_index + 1) % q->entry_count);
235 	return 1;
236 }
237 
238 /**
239  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
240  * @q: The Event Queue to get the first valid EQE from
241  *
242  * This routine will get the first valid Event Queue Entry from @q, update
243  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
244  * the Queue (no more work to do), or the Queue is full of EQEs that have been
245  * processed, but not popped back to the HBA then this routine will return NULL.
246  **/
247 static struct lpfc_eqe *
248 lpfc_sli4_eq_get(struct lpfc_queue *q)
249 {
250 	struct lpfc_eqe *eqe;
251 	uint32_t idx;
252 
253 	/* sanity check on queue memory */
254 	if (unlikely(!q))
255 		return NULL;
256 	eqe = q->qe[q->hba_index].eqe;
257 
258 	/* If the next EQE is not valid then we are done */
259 	if (!bf_get_le32(lpfc_eqe_valid, eqe))
260 		return NULL;
261 	/* If the host has not yet processed the next entry then we are done */
262 	idx = ((q->hba_index + 1) % q->entry_count);
263 	if (idx == q->host_index)
264 		return NULL;
265 
266 	q->hba_index = idx;
267 
268 	/*
269 	 * insert barrier for instruction interlock : data from the hardware
270 	 * must have the valid bit checked before it can be copied and acted
271 	 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
272 	 * instructions allowing action on content before valid bit checked,
273 	 * add barrier here as well. May not be needed as "content" is a
274 	 * single 32-bit entity here (vs multi word structure for cq's).
275 	 */
276 	mb();
277 	return eqe;
278 }
279 
280 /**
281  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
282  * @q: The Event Queue to disable interrupts
283  *
284  **/
285 static inline void
286 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
287 {
288 	struct lpfc_register doorbell;
289 
290 	doorbell.word0 = 0;
291 	bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
292 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
293 	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
294 		(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
295 	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
296 	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
297 }
298 
299 /**
300  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
301  * @q: The Event Queue that the host has completed processing for.
302  * @arm: Indicates whether the host wants to arms this CQ.
303  *
304  * This routine will mark all Event Queue Entries on @q, from the last
305  * known completed entry to the last entry that was processed, as completed
306  * by clearing the valid bit for each completion queue entry. Then it will
307  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
308  * The internal host index in the @q will be updated by this routine to indicate
309  * that the host has finished processing the entries. The @arm parameter
310  * indicates that the queue should be rearmed when ringing the doorbell.
311  *
312  * This function will return the number of EQEs that were popped.
313  **/
314 uint32_t
315 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
316 {
317 	uint32_t released = 0;
318 	struct lpfc_eqe *temp_eqe;
319 	struct lpfc_register doorbell;
320 
321 	/* sanity check on queue memory */
322 	if (unlikely(!q))
323 		return 0;
324 
325 	/* while there are valid entries */
326 	while (q->hba_index != q->host_index) {
327 		temp_eqe = q->qe[q->host_index].eqe;
328 		bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
329 		released++;
330 		q->host_index = ((q->host_index + 1) % q->entry_count);
331 	}
332 	if (unlikely(released == 0 && !arm))
333 		return 0;
334 
335 	/* ring doorbell for number popped */
336 	doorbell.word0 = 0;
337 	if (arm) {
338 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
339 		bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
340 	}
341 	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
342 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
343 	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
344 			(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
345 	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
346 	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
347 	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
348 	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
349 		readl(q->phba->sli4_hba.EQCQDBregaddr);
350 	return released;
351 }
352 
353 /**
354  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
355  * @q: The Completion Queue to get the first valid CQE from
356  *
357  * This routine will get the first valid Completion Queue Entry from @q, update
358  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
359  * the Queue (no more work to do), or the Queue is full of CQEs that have been
360  * processed, but not popped back to the HBA then this routine will return NULL.
361  **/
362 static struct lpfc_cqe *
363 lpfc_sli4_cq_get(struct lpfc_queue *q)
364 {
365 	struct lpfc_cqe *cqe;
366 	uint32_t idx;
367 
368 	/* sanity check on queue memory */
369 	if (unlikely(!q))
370 		return NULL;
371 
372 	/* If the next CQE is not valid then we are done */
373 	if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
374 		return NULL;
375 	/* If the host has not yet processed the next entry then we are done */
376 	idx = ((q->hba_index + 1) % q->entry_count);
377 	if (idx == q->host_index)
378 		return NULL;
379 
380 	cqe = q->qe[q->hba_index].cqe;
381 	q->hba_index = idx;
382 
383 	/*
384 	 * insert barrier for instruction interlock : data from the hardware
385 	 * must have the valid bit checked before it can be copied and acted
386 	 * upon. Speculative instructions were allowing a bcopy at the start
387 	 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
388 	 * after our return, to copy data before the valid bit check above
389 	 * was done. As such, some of the copied data was stale. The barrier
390 	 * ensures the check is before any data is copied.
391 	 */
392 	mb();
393 	return cqe;
394 }
395 
396 /**
397  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
398  * @q: The Completion Queue that the host has completed processing for.
399  * @arm: Indicates whether the host wants to arms this CQ.
400  *
401  * This routine will mark all Completion queue entries on @q, from the last
402  * known completed entry to the last entry that was processed, as completed
403  * by clearing the valid bit for each completion queue entry. Then it will
404  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
405  * The internal host index in the @q will be updated by this routine to indicate
406  * that the host has finished processing the entries. The @arm parameter
407  * indicates that the queue should be rearmed when ringing the doorbell.
408  *
409  * This function will return the number of CQEs that were released.
410  **/
411 uint32_t
412 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
413 {
414 	uint32_t released = 0;
415 	struct lpfc_cqe *temp_qe;
416 	struct lpfc_register doorbell;
417 
418 	/* sanity check on queue memory */
419 	if (unlikely(!q))
420 		return 0;
421 	/* while there are valid entries */
422 	while (q->hba_index != q->host_index) {
423 		temp_qe = q->qe[q->host_index].cqe;
424 		bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
425 		released++;
426 		q->host_index = ((q->host_index + 1) % q->entry_count);
427 	}
428 	if (unlikely(released == 0 && !arm))
429 		return 0;
430 
431 	/* ring doorbell for number popped */
432 	doorbell.word0 = 0;
433 	if (arm)
434 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
435 	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
436 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
437 	bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
438 			(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
439 	bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
440 	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
441 	return released;
442 }
443 
444 /**
445  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
446  * @q: The Header Receive Queue to operate on.
447  * @wqe: The Receive Queue Entry to put on the Receive queue.
448  *
449  * This routine will copy the contents of @wqe to the next available entry on
450  * the @q. This function will then ring the Receive Queue Doorbell to signal the
451  * HBA to start processing the Receive Queue Entry. This function returns the
452  * index that the rqe was copied to if successful. If no entries are available
453  * on @q then this function will return -ENOMEM.
454  * The caller is expected to hold the hbalock when calling this routine.
455  **/
456 static int
457 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
458 		 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
459 {
460 	struct lpfc_rqe *temp_hrqe;
461 	struct lpfc_rqe *temp_drqe;
462 	struct lpfc_register doorbell;
463 	int put_index;
464 
465 	/* sanity check on queue memory */
466 	if (unlikely(!hq) || unlikely(!dq))
467 		return -ENOMEM;
468 	put_index = hq->host_index;
469 	temp_hrqe = hq->qe[hq->host_index].rqe;
470 	temp_drqe = dq->qe[dq->host_index].rqe;
471 
472 	if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
473 		return -EINVAL;
474 	if (hq->host_index != dq->host_index)
475 		return -EINVAL;
476 	/* If the host has not yet processed the next entry then we are done */
477 	if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
478 		return -EBUSY;
479 	lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
480 	lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
481 
482 	/* Update the host index to point to the next slot */
483 	hq->host_index = ((hq->host_index + 1) % hq->entry_count);
484 	dq->host_index = ((dq->host_index + 1) % dq->entry_count);
485 
486 	/* Ring The Header Receive Queue Doorbell */
487 	if (!(hq->host_index % hq->entry_repost)) {
488 		doorbell.word0 = 0;
489 		if (hq->db_format == LPFC_DB_RING_FORMAT) {
490 			bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
491 			       hq->entry_repost);
492 			bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
493 		} else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
494 			bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
495 			       hq->entry_repost);
496 			bf_set(lpfc_rq_db_list_fm_index, &doorbell,
497 			       hq->host_index);
498 			bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
499 		} else {
500 			return -EINVAL;
501 		}
502 		writel(doorbell.word0, hq->db_regaddr);
503 	}
504 	return put_index;
505 }
506 
507 /**
508  * lpfc_sli4_rq_release - Updates internal hba index for RQ
509  * @q: The Header Receive Queue to operate on.
510  *
511  * This routine will update the HBA index of a queue to reflect consumption of
512  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
513  * consumed an entry the host calls this function to update the queue's
514  * internal pointers. This routine returns the number of entries that were
515  * consumed by the HBA.
516  **/
517 static uint32_t
518 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
519 {
520 	/* sanity check on queue memory */
521 	if (unlikely(!hq) || unlikely(!dq))
522 		return 0;
523 
524 	if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
525 		return 0;
526 	hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
527 	dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
528 	return 1;
529 }
530 
531 /**
532  * lpfc_cmd_iocb - Get next command iocb entry in the ring
533  * @phba: Pointer to HBA context object.
534  * @pring: Pointer to driver SLI ring object.
535  *
536  * This function returns pointer to next command iocb entry
537  * in the command ring. The caller must hold hbalock to prevent
538  * other threads consume the next command iocb.
539  * SLI-2/SLI-3 provide different sized iocbs.
540  **/
541 static inline IOCB_t *
542 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
543 {
544 	return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
545 			   pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
546 }
547 
548 /**
549  * lpfc_resp_iocb - Get next response iocb entry in the ring
550  * @phba: Pointer to HBA context object.
551  * @pring: Pointer to driver SLI ring object.
552  *
553  * This function returns pointer to next response iocb entry
554  * in the response ring. The caller must hold hbalock to make sure
555  * that no other thread consume the next response iocb.
556  * SLI-2/SLI-3 provide different sized iocbs.
557  **/
558 static inline IOCB_t *
559 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
560 {
561 	return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
562 			   pring->sli.sli3.rspidx * phba->iocb_rsp_size);
563 }
564 
565 /**
566  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
567  * @phba: Pointer to HBA context object.
568  *
569  * This function is called with hbalock held. This function
570  * allocates a new driver iocb object from the iocb pool. If the
571  * allocation is successful, it returns pointer to the newly
572  * allocated iocb object else it returns NULL.
573  **/
574 struct lpfc_iocbq *
575 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
576 {
577 	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
578 	struct lpfc_iocbq * iocbq = NULL;
579 
580 	lockdep_assert_held(&phba->hbalock);
581 
582 	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
583 	if (iocbq)
584 		phba->iocb_cnt++;
585 	if (phba->iocb_cnt > phba->iocb_max)
586 		phba->iocb_max = phba->iocb_cnt;
587 	return iocbq;
588 }
589 
590 /**
591  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
592  * @phba: Pointer to HBA context object.
593  * @xritag: XRI value.
594  *
595  * This function clears the sglq pointer from the array of acive
596  * sglq's. The xritag that is passed in is used to index into the
597  * array. Before the xritag can be used it needs to be adjusted
598  * by subtracting the xribase.
599  *
600  * Returns sglq ponter = success, NULL = Failure.
601  **/
602 static struct lpfc_sglq *
603 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
604 {
605 	struct lpfc_sglq *sglq;
606 
607 	sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
608 	phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
609 	return sglq;
610 }
611 
612 /**
613  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
614  * @phba: Pointer to HBA context object.
615  * @xritag: XRI value.
616  *
617  * This function returns the sglq pointer from the array of acive
618  * sglq's. The xritag that is passed in is used to index into the
619  * array. Before the xritag can be used it needs to be adjusted
620  * by subtracting the xribase.
621  *
622  * Returns sglq ponter = success, NULL = Failure.
623  **/
624 struct lpfc_sglq *
625 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
626 {
627 	struct lpfc_sglq *sglq;
628 
629 	sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
630 	return sglq;
631 }
632 
633 /**
634  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
635  * @phba: Pointer to HBA context object.
636  * @xritag: xri used in this exchange.
637  * @rrq: The RRQ to be cleared.
638  *
639  **/
640 void
641 lpfc_clr_rrq_active(struct lpfc_hba *phba,
642 		    uint16_t xritag,
643 		    struct lpfc_node_rrq *rrq)
644 {
645 	struct lpfc_nodelist *ndlp = NULL;
646 
647 	if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
648 		ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
649 
650 	/* The target DID could have been swapped (cable swap)
651 	 * we should use the ndlp from the findnode if it is
652 	 * available.
653 	 */
654 	if ((!ndlp) && rrq->ndlp)
655 		ndlp = rrq->ndlp;
656 
657 	if (!ndlp)
658 		goto out;
659 
660 	if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
661 		rrq->send_rrq = 0;
662 		rrq->xritag = 0;
663 		rrq->rrq_stop_time = 0;
664 	}
665 out:
666 	mempool_free(rrq, phba->rrq_pool);
667 }
668 
669 /**
670  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
671  * @phba: Pointer to HBA context object.
672  *
673  * This function is called with hbalock held. This function
674  * Checks if stop_time (ratov from setting rrq active) has
675  * been reached, if it has and the send_rrq flag is set then
676  * it will call lpfc_send_rrq. If the send_rrq flag is not set
677  * then it will just call the routine to clear the rrq and
678  * free the rrq resource.
679  * The timer is set to the next rrq that is going to expire before
680  * leaving the routine.
681  *
682  **/
683 void
684 lpfc_handle_rrq_active(struct lpfc_hba *phba)
685 {
686 	struct lpfc_node_rrq *rrq;
687 	struct lpfc_node_rrq *nextrrq;
688 	unsigned long next_time;
689 	unsigned long iflags;
690 	LIST_HEAD(send_rrq);
691 
692 	spin_lock_irqsave(&phba->hbalock, iflags);
693 	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
694 	next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
695 	list_for_each_entry_safe(rrq, nextrrq,
696 				 &phba->active_rrq_list, list) {
697 		if (time_after(jiffies, rrq->rrq_stop_time))
698 			list_move(&rrq->list, &send_rrq);
699 		else if (time_before(rrq->rrq_stop_time, next_time))
700 			next_time = rrq->rrq_stop_time;
701 	}
702 	spin_unlock_irqrestore(&phba->hbalock, iflags);
703 	if ((!list_empty(&phba->active_rrq_list)) &&
704 	    (!(phba->pport->load_flag & FC_UNLOADING)))
705 		mod_timer(&phba->rrq_tmr, next_time);
706 	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
707 		list_del(&rrq->list);
708 		if (!rrq->send_rrq)
709 			/* this call will free the rrq */
710 		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
711 		else if (lpfc_send_rrq(phba, rrq)) {
712 			/* if we send the rrq then the completion handler
713 			*  will clear the bit in the xribitmap.
714 			*/
715 			lpfc_clr_rrq_active(phba, rrq->xritag,
716 					    rrq);
717 		}
718 	}
719 }
720 
721 /**
722  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
723  * @vport: Pointer to vport context object.
724  * @xri: The xri used in the exchange.
725  * @did: The targets DID for this exchange.
726  *
727  * returns NULL = rrq not found in the phba->active_rrq_list.
728  *         rrq = rrq for this xri and target.
729  **/
730 struct lpfc_node_rrq *
731 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
732 {
733 	struct lpfc_hba *phba = vport->phba;
734 	struct lpfc_node_rrq *rrq;
735 	struct lpfc_node_rrq *nextrrq;
736 	unsigned long iflags;
737 
738 	if (phba->sli_rev != LPFC_SLI_REV4)
739 		return NULL;
740 	spin_lock_irqsave(&phba->hbalock, iflags);
741 	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
742 		if (rrq->vport == vport && rrq->xritag == xri &&
743 				rrq->nlp_DID == did){
744 			list_del(&rrq->list);
745 			spin_unlock_irqrestore(&phba->hbalock, iflags);
746 			return rrq;
747 		}
748 	}
749 	spin_unlock_irqrestore(&phba->hbalock, iflags);
750 	return NULL;
751 }
752 
753 /**
754  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
755  * @vport: Pointer to vport context object.
756  * @ndlp: Pointer to the lpfc_node_list structure.
757  * If ndlp is NULL Remove all active RRQs for this vport from the
758  * phba->active_rrq_list and clear the rrq.
759  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
760  **/
761 void
762 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
763 
764 {
765 	struct lpfc_hba *phba = vport->phba;
766 	struct lpfc_node_rrq *rrq;
767 	struct lpfc_node_rrq *nextrrq;
768 	unsigned long iflags;
769 	LIST_HEAD(rrq_list);
770 
771 	if (phba->sli_rev != LPFC_SLI_REV4)
772 		return;
773 	if (!ndlp) {
774 		lpfc_sli4_vport_delete_els_xri_aborted(vport);
775 		lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
776 	}
777 	spin_lock_irqsave(&phba->hbalock, iflags);
778 	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
779 		if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
780 			list_move(&rrq->list, &rrq_list);
781 	spin_unlock_irqrestore(&phba->hbalock, iflags);
782 
783 	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
784 		list_del(&rrq->list);
785 		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
786 	}
787 }
788 
789 /**
790  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
791  * @phba: Pointer to HBA context object.
792  * @ndlp: Targets nodelist pointer for this exchange.
793  * @xritag the xri in the bitmap to test.
794  *
795  * This function is called with hbalock held. This function
796  * returns 0 = rrq not active for this xri
797  *         1 = rrq is valid for this xri.
798  **/
799 int
800 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
801 			uint16_t  xritag)
802 {
803 	lockdep_assert_held(&phba->hbalock);
804 	if (!ndlp)
805 		return 0;
806 	if (!ndlp->active_rrqs_xri_bitmap)
807 		return 0;
808 	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
809 			return 1;
810 	else
811 		return 0;
812 }
813 
814 /**
815  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
816  * @phba: Pointer to HBA context object.
817  * @ndlp: nodelist pointer for this target.
818  * @xritag: xri used in this exchange.
819  * @rxid: Remote Exchange ID.
820  * @send_rrq: Flag used to determine if we should send rrq els cmd.
821  *
822  * This function takes the hbalock.
823  * The active bit is always set in the active rrq xri_bitmap even
824  * if there is no slot avaiable for the other rrq information.
825  *
826  * returns 0 rrq actived for this xri
827  *         < 0 No memory or invalid ndlp.
828  **/
829 int
830 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
831 		    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
832 {
833 	unsigned long iflags;
834 	struct lpfc_node_rrq *rrq;
835 	int empty;
836 
837 	if (!ndlp)
838 		return -EINVAL;
839 
840 	if (!phba->cfg_enable_rrq)
841 		return -EINVAL;
842 
843 	spin_lock_irqsave(&phba->hbalock, iflags);
844 	if (phba->pport->load_flag & FC_UNLOADING) {
845 		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
846 		goto out;
847 	}
848 
849 	/*
850 	 * set the active bit even if there is no mem available.
851 	 */
852 	if (NLP_CHK_FREE_REQ(ndlp))
853 		goto out;
854 
855 	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
856 		goto out;
857 
858 	if (!ndlp->active_rrqs_xri_bitmap)
859 		goto out;
860 
861 	if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
862 		goto out;
863 
864 	spin_unlock_irqrestore(&phba->hbalock, iflags);
865 	rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
866 	if (!rrq) {
867 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
868 				"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
869 				" DID:0x%x Send:%d\n",
870 				xritag, rxid, ndlp->nlp_DID, send_rrq);
871 		return -EINVAL;
872 	}
873 	if (phba->cfg_enable_rrq == 1)
874 		rrq->send_rrq = send_rrq;
875 	else
876 		rrq->send_rrq = 0;
877 	rrq->xritag = xritag;
878 	rrq->rrq_stop_time = jiffies +
879 				msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
880 	rrq->ndlp = ndlp;
881 	rrq->nlp_DID = ndlp->nlp_DID;
882 	rrq->vport = ndlp->vport;
883 	rrq->rxid = rxid;
884 	spin_lock_irqsave(&phba->hbalock, iflags);
885 	empty = list_empty(&phba->active_rrq_list);
886 	list_add_tail(&rrq->list, &phba->active_rrq_list);
887 	phba->hba_flag |= HBA_RRQ_ACTIVE;
888 	if (empty)
889 		lpfc_worker_wake_up(phba);
890 	spin_unlock_irqrestore(&phba->hbalock, iflags);
891 	return 0;
892 out:
893 	spin_unlock_irqrestore(&phba->hbalock, iflags);
894 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
895 			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
896 			" DID:0x%x Send:%d\n",
897 			xritag, rxid, ndlp->nlp_DID, send_rrq);
898 	return -EINVAL;
899 }
900 
901 /**
902  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
903  * @phba: Pointer to HBA context object.
904  * @piocb: Pointer to the iocbq.
905  *
906  * This function is called with the ring lock held. This function
907  * gets a new driver sglq object from the sglq list. If the
908  * list is not empty then it is successful, it returns pointer to the newly
909  * allocated sglq object else it returns NULL.
910  **/
911 static struct lpfc_sglq *
912 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
913 {
914 	struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
915 	struct lpfc_sglq *sglq = NULL;
916 	struct lpfc_sglq *start_sglq = NULL;
917 	struct lpfc_scsi_buf *lpfc_cmd;
918 	struct lpfc_nodelist *ndlp;
919 	int found = 0;
920 
921 	lockdep_assert_held(&phba->hbalock);
922 
923 	if (piocbq->iocb_flag &  LPFC_IO_FCP) {
924 		lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
925 		ndlp = lpfc_cmd->rdata->pnode;
926 	} else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
927 			!(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
928 		ndlp = piocbq->context_un.ndlp;
929 	} else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
930 		if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
931 			ndlp = NULL;
932 		else
933 			ndlp = piocbq->context_un.ndlp;
934 	} else {
935 		ndlp = piocbq->context1;
936 	}
937 
938 	list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
939 	start_sglq = sglq;
940 	while (!found) {
941 		if (!sglq)
942 			return NULL;
943 		if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
944 			/* This xri has an rrq outstanding for this DID.
945 			 * put it back in the list and get another xri.
946 			 */
947 			list_add_tail(&sglq->list, lpfc_sgl_list);
948 			sglq = NULL;
949 			list_remove_head(lpfc_sgl_list, sglq,
950 						struct lpfc_sglq, list);
951 			if (sglq == start_sglq) {
952 				sglq = NULL;
953 				break;
954 			} else
955 				continue;
956 		}
957 		sglq->ndlp = ndlp;
958 		found = 1;
959 		phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
960 		sglq->state = SGL_ALLOCATED;
961 	}
962 	return sglq;
963 }
964 
965 /**
966  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
967  * @phba: Pointer to HBA context object.
968  *
969  * This function is called with no lock held. This function
970  * allocates a new driver iocb object from the iocb pool. If the
971  * allocation is successful, it returns pointer to the newly
972  * allocated iocb object else it returns NULL.
973  **/
974 struct lpfc_iocbq *
975 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
976 {
977 	struct lpfc_iocbq * iocbq = NULL;
978 	unsigned long iflags;
979 
980 	spin_lock_irqsave(&phba->hbalock, iflags);
981 	iocbq = __lpfc_sli_get_iocbq(phba);
982 	spin_unlock_irqrestore(&phba->hbalock, iflags);
983 	return iocbq;
984 }
985 
986 /**
987  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
988  * @phba: Pointer to HBA context object.
989  * @iocbq: Pointer to driver iocb object.
990  *
991  * This function is called with hbalock held to release driver
992  * iocb object to the iocb pool. The iotag in the iocb object
993  * does not change for each use of the iocb object. This function
994  * clears all other fields of the iocb object when it is freed.
995  * The sqlq structure that holds the xritag and phys and virtual
996  * mappings for the scatter gather list is retrieved from the
997  * active array of sglq. The get of the sglq pointer also clears
998  * the entry in the array. If the status of the IO indiactes that
999  * this IO was aborted then the sglq entry it put on the
1000  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1001  * IO has good status or fails for any other reason then the sglq
1002  * entry is added to the free list (lpfc_sgl_list).
1003  **/
1004 static void
1005 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1006 {
1007 	struct lpfc_sglq *sglq;
1008 	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1009 	unsigned long iflag = 0;
1010 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
1011 
1012 	lockdep_assert_held(&phba->hbalock);
1013 
1014 	if (iocbq->sli4_xritag == NO_XRI)
1015 		sglq = NULL;
1016 	else
1017 		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1018 
1019 
1020 	if (sglq)  {
1021 		if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1022 			(sglq->state != SGL_XRI_ABORTED)) {
1023 			spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
1024 					iflag);
1025 			list_add(&sglq->list,
1026 				&phba->sli4_hba.lpfc_abts_els_sgl_list);
1027 			spin_unlock_irqrestore(
1028 				&phba->sli4_hba.abts_sgl_list_lock, iflag);
1029 		} else {
1030 			spin_lock_irqsave(&pring->ring_lock, iflag);
1031 			sglq->state = SGL_FREED;
1032 			sglq->ndlp = NULL;
1033 			list_add_tail(&sglq->list,
1034 				&phba->sli4_hba.lpfc_sgl_list);
1035 			spin_unlock_irqrestore(&pring->ring_lock, iflag);
1036 
1037 			/* Check if TXQ queue needs to be serviced */
1038 			if (!list_empty(&pring->txq))
1039 				lpfc_worker_wake_up(phba);
1040 		}
1041 	}
1042 
1043 
1044 	/*
1045 	 * Clean all volatile data fields, preserve iotag and node struct.
1046 	 */
1047 	memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1048 	iocbq->sli4_lxritag = NO_XRI;
1049 	iocbq->sli4_xritag = NO_XRI;
1050 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1051 }
1052 
1053 
1054 /**
1055  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1056  * @phba: Pointer to HBA context object.
1057  * @iocbq: Pointer to driver iocb object.
1058  *
1059  * This function is called with hbalock held to release driver
1060  * iocb object to the iocb pool. The iotag in the iocb object
1061  * does not change for each use of the iocb object. This function
1062  * clears all other fields of the iocb object when it is freed.
1063  **/
1064 static void
1065 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1066 {
1067 	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1068 
1069 	lockdep_assert_held(&phba->hbalock);
1070 
1071 	/*
1072 	 * Clean all volatile data fields, preserve iotag and node struct.
1073 	 */
1074 	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1075 	iocbq->sli4_xritag = NO_XRI;
1076 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1077 }
1078 
1079 /**
1080  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1081  * @phba: Pointer to HBA context object.
1082  * @iocbq: Pointer to driver iocb object.
1083  *
1084  * This function is called with hbalock held to release driver
1085  * iocb object to the iocb pool. The iotag in the iocb object
1086  * does not change for each use of the iocb object. This function
1087  * clears all other fields of the iocb object when it is freed.
1088  **/
1089 static void
1090 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1091 {
1092 	lockdep_assert_held(&phba->hbalock);
1093 
1094 	phba->__lpfc_sli_release_iocbq(phba, iocbq);
1095 	phba->iocb_cnt--;
1096 }
1097 
1098 /**
1099  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1100  * @phba: Pointer to HBA context object.
1101  * @iocbq: Pointer to driver iocb object.
1102  *
1103  * This function is called with no lock held to release the iocb to
1104  * iocb pool.
1105  **/
1106 void
1107 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1108 {
1109 	unsigned long iflags;
1110 
1111 	/*
1112 	 * Clean all volatile data fields, preserve iotag and node struct.
1113 	 */
1114 	spin_lock_irqsave(&phba->hbalock, iflags);
1115 	__lpfc_sli_release_iocbq(phba, iocbq);
1116 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1117 }
1118 
1119 /**
1120  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1121  * @phba: Pointer to HBA context object.
1122  * @iocblist: List of IOCBs.
1123  * @ulpstatus: ULP status in IOCB command field.
1124  * @ulpWord4: ULP word-4 in IOCB command field.
1125  *
1126  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1127  * on the list by invoking the complete callback function associated with the
1128  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1129  * fields.
1130  **/
1131 void
1132 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1133 		      uint32_t ulpstatus, uint32_t ulpWord4)
1134 {
1135 	struct lpfc_iocbq *piocb;
1136 
1137 	while (!list_empty(iocblist)) {
1138 		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1139 		if (!piocb->iocb_cmpl)
1140 			lpfc_sli_release_iocbq(phba, piocb);
1141 		else {
1142 			piocb->iocb.ulpStatus = ulpstatus;
1143 			piocb->iocb.un.ulpWord[4] = ulpWord4;
1144 			(piocb->iocb_cmpl) (phba, piocb, piocb);
1145 		}
1146 	}
1147 	return;
1148 }
1149 
1150 /**
1151  * lpfc_sli_iocb_cmd_type - Get the iocb type
1152  * @iocb_cmnd: iocb command code.
1153  *
1154  * This function is called by ring event handler function to get the iocb type.
1155  * This function translates the iocb command to an iocb command type used to
1156  * decide the final disposition of each completed IOCB.
1157  * The function returns
1158  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1159  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1160  * LPFC_ABORT_IOCB   if it is an abort iocb
1161  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1162  *
1163  * The caller is not required to hold any lock.
1164  **/
1165 static lpfc_iocb_type
1166 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1167 {
1168 	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1169 
1170 	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1171 		return 0;
1172 
1173 	switch (iocb_cmnd) {
1174 	case CMD_XMIT_SEQUENCE_CR:
1175 	case CMD_XMIT_SEQUENCE_CX:
1176 	case CMD_XMIT_BCAST_CN:
1177 	case CMD_XMIT_BCAST_CX:
1178 	case CMD_ELS_REQUEST_CR:
1179 	case CMD_ELS_REQUEST_CX:
1180 	case CMD_CREATE_XRI_CR:
1181 	case CMD_CREATE_XRI_CX:
1182 	case CMD_GET_RPI_CN:
1183 	case CMD_XMIT_ELS_RSP_CX:
1184 	case CMD_GET_RPI_CR:
1185 	case CMD_FCP_IWRITE_CR:
1186 	case CMD_FCP_IWRITE_CX:
1187 	case CMD_FCP_IREAD_CR:
1188 	case CMD_FCP_IREAD_CX:
1189 	case CMD_FCP_ICMND_CR:
1190 	case CMD_FCP_ICMND_CX:
1191 	case CMD_FCP_TSEND_CX:
1192 	case CMD_FCP_TRSP_CX:
1193 	case CMD_FCP_TRECEIVE_CX:
1194 	case CMD_FCP_AUTO_TRSP_CX:
1195 	case CMD_ADAPTER_MSG:
1196 	case CMD_ADAPTER_DUMP:
1197 	case CMD_XMIT_SEQUENCE64_CR:
1198 	case CMD_XMIT_SEQUENCE64_CX:
1199 	case CMD_XMIT_BCAST64_CN:
1200 	case CMD_XMIT_BCAST64_CX:
1201 	case CMD_ELS_REQUEST64_CR:
1202 	case CMD_ELS_REQUEST64_CX:
1203 	case CMD_FCP_IWRITE64_CR:
1204 	case CMD_FCP_IWRITE64_CX:
1205 	case CMD_FCP_IREAD64_CR:
1206 	case CMD_FCP_IREAD64_CX:
1207 	case CMD_FCP_ICMND64_CR:
1208 	case CMD_FCP_ICMND64_CX:
1209 	case CMD_FCP_TSEND64_CX:
1210 	case CMD_FCP_TRSP64_CX:
1211 	case CMD_FCP_TRECEIVE64_CX:
1212 	case CMD_GEN_REQUEST64_CR:
1213 	case CMD_GEN_REQUEST64_CX:
1214 	case CMD_XMIT_ELS_RSP64_CX:
1215 	case DSSCMD_IWRITE64_CR:
1216 	case DSSCMD_IWRITE64_CX:
1217 	case DSSCMD_IREAD64_CR:
1218 	case DSSCMD_IREAD64_CX:
1219 		type = LPFC_SOL_IOCB;
1220 		break;
1221 	case CMD_ABORT_XRI_CN:
1222 	case CMD_ABORT_XRI_CX:
1223 	case CMD_CLOSE_XRI_CN:
1224 	case CMD_CLOSE_XRI_CX:
1225 	case CMD_XRI_ABORTED_CX:
1226 	case CMD_ABORT_MXRI64_CN:
1227 	case CMD_XMIT_BLS_RSP64_CX:
1228 		type = LPFC_ABORT_IOCB;
1229 		break;
1230 	case CMD_RCV_SEQUENCE_CX:
1231 	case CMD_RCV_ELS_REQ_CX:
1232 	case CMD_RCV_SEQUENCE64_CX:
1233 	case CMD_RCV_ELS_REQ64_CX:
1234 	case CMD_ASYNC_STATUS:
1235 	case CMD_IOCB_RCV_SEQ64_CX:
1236 	case CMD_IOCB_RCV_ELS64_CX:
1237 	case CMD_IOCB_RCV_CONT64_CX:
1238 	case CMD_IOCB_RET_XRI64_CX:
1239 		type = LPFC_UNSOL_IOCB;
1240 		break;
1241 	case CMD_IOCB_XMIT_MSEQ64_CR:
1242 	case CMD_IOCB_XMIT_MSEQ64_CX:
1243 	case CMD_IOCB_RCV_SEQ_LIST64_CX:
1244 	case CMD_IOCB_RCV_ELS_LIST64_CX:
1245 	case CMD_IOCB_CLOSE_EXTENDED_CN:
1246 	case CMD_IOCB_ABORT_EXTENDED_CN:
1247 	case CMD_IOCB_RET_HBQE64_CN:
1248 	case CMD_IOCB_FCP_IBIDIR64_CR:
1249 	case CMD_IOCB_FCP_IBIDIR64_CX:
1250 	case CMD_IOCB_FCP_ITASKMGT64_CX:
1251 	case CMD_IOCB_LOGENTRY_CN:
1252 	case CMD_IOCB_LOGENTRY_ASYNC_CN:
1253 		printk("%s - Unhandled SLI-3 Command x%x\n",
1254 				__func__, iocb_cmnd);
1255 		type = LPFC_UNKNOWN_IOCB;
1256 		break;
1257 	default:
1258 		type = LPFC_UNKNOWN_IOCB;
1259 		break;
1260 	}
1261 
1262 	return type;
1263 }
1264 
1265 /**
1266  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1267  * @phba: Pointer to HBA context object.
1268  *
1269  * This function is called from SLI initialization code
1270  * to configure every ring of the HBA's SLI interface. The
1271  * caller is not required to hold any lock. This function issues
1272  * a config_ring mailbox command for each ring.
1273  * This function returns zero if successful else returns a negative
1274  * error code.
1275  **/
1276 static int
1277 lpfc_sli_ring_map(struct lpfc_hba *phba)
1278 {
1279 	struct lpfc_sli *psli = &phba->sli;
1280 	LPFC_MBOXQ_t *pmb;
1281 	MAILBOX_t *pmbox;
1282 	int i, rc, ret = 0;
1283 
1284 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1285 	if (!pmb)
1286 		return -ENOMEM;
1287 	pmbox = &pmb->u.mb;
1288 	phba->link_state = LPFC_INIT_MBX_CMDS;
1289 	for (i = 0; i < psli->num_rings; i++) {
1290 		lpfc_config_ring(phba, i, pmb);
1291 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1292 		if (rc != MBX_SUCCESS) {
1293 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1294 					"0446 Adapter failed to init (%d), "
1295 					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
1296 					"ring %d\n",
1297 					rc, pmbox->mbxCommand,
1298 					pmbox->mbxStatus, i);
1299 			phba->link_state = LPFC_HBA_ERROR;
1300 			ret = -ENXIO;
1301 			break;
1302 		}
1303 	}
1304 	mempool_free(pmb, phba->mbox_mem_pool);
1305 	return ret;
1306 }
1307 
1308 /**
1309  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1310  * @phba: Pointer to HBA context object.
1311  * @pring: Pointer to driver SLI ring object.
1312  * @piocb: Pointer to the driver iocb object.
1313  *
1314  * This function is called with hbalock held. The function adds the
1315  * new iocb to txcmplq of the given ring. This function always returns
1316  * 0. If this function is called for ELS ring, this function checks if
1317  * there is a vport associated with the ELS command. This function also
1318  * starts els_tmofunc timer if this is an ELS command.
1319  **/
1320 static int
1321 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1322 			struct lpfc_iocbq *piocb)
1323 {
1324 	lockdep_assert_held(&phba->hbalock);
1325 
1326 	BUG_ON(!piocb || !piocb->vport);
1327 
1328 	list_add_tail(&piocb->list, &pring->txcmplq);
1329 	piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1330 
1331 	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1332 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1333 	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
1334 	    (!(piocb->vport->load_flag & FC_UNLOADING)))
1335 		mod_timer(&piocb->vport->els_tmofunc,
1336 			  jiffies +
1337 			  msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1338 
1339 	return 0;
1340 }
1341 
1342 /**
1343  * lpfc_sli_ringtx_get - Get first element of the txq
1344  * @phba: Pointer to HBA context object.
1345  * @pring: Pointer to driver SLI ring object.
1346  *
1347  * This function is called with hbalock held to get next
1348  * iocb in txq of the given ring. If there is any iocb in
1349  * the txq, the function returns first iocb in the list after
1350  * removing the iocb from the list, else it returns NULL.
1351  **/
1352 struct lpfc_iocbq *
1353 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1354 {
1355 	struct lpfc_iocbq *cmd_iocb;
1356 
1357 	lockdep_assert_held(&phba->hbalock);
1358 
1359 	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1360 	return cmd_iocb;
1361 }
1362 
1363 /**
1364  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1365  * @phba: Pointer to HBA context object.
1366  * @pring: Pointer to driver SLI ring object.
1367  *
1368  * This function is called with hbalock held and the caller must post the
1369  * iocb without releasing the lock. If the caller releases the lock,
1370  * iocb slot returned by the function is not guaranteed to be available.
1371  * The function returns pointer to the next available iocb slot if there
1372  * is available slot in the ring, else it returns NULL.
1373  * If the get index of the ring is ahead of the put index, the function
1374  * will post an error attention event to the worker thread to take the
1375  * HBA to offline state.
1376  **/
1377 static IOCB_t *
1378 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1379 {
1380 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1381 	uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1382 
1383 	lockdep_assert_held(&phba->hbalock);
1384 
1385 	if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1386 	   (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1387 		pring->sli.sli3.next_cmdidx = 0;
1388 
1389 	if (unlikely(pring->sli.sli3.local_getidx ==
1390 		pring->sli.sli3.next_cmdidx)) {
1391 
1392 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1393 
1394 		if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1395 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1396 					"0315 Ring %d issue: portCmdGet %d "
1397 					"is bigger than cmd ring %d\n",
1398 					pring->ringno,
1399 					pring->sli.sli3.local_getidx,
1400 					max_cmd_idx);
1401 
1402 			phba->link_state = LPFC_HBA_ERROR;
1403 			/*
1404 			 * All error attention handlers are posted to
1405 			 * worker thread
1406 			 */
1407 			phba->work_ha |= HA_ERATT;
1408 			phba->work_hs = HS_FFER3;
1409 
1410 			lpfc_worker_wake_up(phba);
1411 
1412 			return NULL;
1413 		}
1414 
1415 		if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1416 			return NULL;
1417 	}
1418 
1419 	return lpfc_cmd_iocb(phba, pring);
1420 }
1421 
1422 /**
1423  * lpfc_sli_next_iotag - Get an iotag for the iocb
1424  * @phba: Pointer to HBA context object.
1425  * @iocbq: Pointer to driver iocb object.
1426  *
1427  * This function gets an iotag for the iocb. If there is no unused iotag and
1428  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1429  * array and assigns a new iotag.
1430  * The function returns the allocated iotag if successful, else returns zero.
1431  * Zero is not a valid iotag.
1432  * The caller is not required to hold any lock.
1433  **/
1434 uint16_t
1435 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1436 {
1437 	struct lpfc_iocbq **new_arr;
1438 	struct lpfc_iocbq **old_arr;
1439 	size_t new_len;
1440 	struct lpfc_sli *psli = &phba->sli;
1441 	uint16_t iotag;
1442 
1443 	spin_lock_irq(&phba->hbalock);
1444 	iotag = psli->last_iotag;
1445 	if(++iotag < psli->iocbq_lookup_len) {
1446 		psli->last_iotag = iotag;
1447 		psli->iocbq_lookup[iotag] = iocbq;
1448 		spin_unlock_irq(&phba->hbalock);
1449 		iocbq->iotag = iotag;
1450 		return iotag;
1451 	} else if (psli->iocbq_lookup_len < (0xffff
1452 					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1453 		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1454 		spin_unlock_irq(&phba->hbalock);
1455 		new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1456 				  GFP_KERNEL);
1457 		if (new_arr) {
1458 			spin_lock_irq(&phba->hbalock);
1459 			old_arr = psli->iocbq_lookup;
1460 			if (new_len <= psli->iocbq_lookup_len) {
1461 				/* highly unprobable case */
1462 				kfree(new_arr);
1463 				iotag = psli->last_iotag;
1464 				if(++iotag < psli->iocbq_lookup_len) {
1465 					psli->last_iotag = iotag;
1466 					psli->iocbq_lookup[iotag] = iocbq;
1467 					spin_unlock_irq(&phba->hbalock);
1468 					iocbq->iotag = iotag;
1469 					return iotag;
1470 				}
1471 				spin_unlock_irq(&phba->hbalock);
1472 				return 0;
1473 			}
1474 			if (psli->iocbq_lookup)
1475 				memcpy(new_arr, old_arr,
1476 				       ((psli->last_iotag  + 1) *
1477 					sizeof (struct lpfc_iocbq *)));
1478 			psli->iocbq_lookup = new_arr;
1479 			psli->iocbq_lookup_len = new_len;
1480 			psli->last_iotag = iotag;
1481 			psli->iocbq_lookup[iotag] = iocbq;
1482 			spin_unlock_irq(&phba->hbalock);
1483 			iocbq->iotag = iotag;
1484 			kfree(old_arr);
1485 			return iotag;
1486 		}
1487 	} else
1488 		spin_unlock_irq(&phba->hbalock);
1489 
1490 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1491 			"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1492 			psli->last_iotag);
1493 
1494 	return 0;
1495 }
1496 
1497 /**
1498  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1499  * @phba: Pointer to HBA context object.
1500  * @pring: Pointer to driver SLI ring object.
1501  * @iocb: Pointer to iocb slot in the ring.
1502  * @nextiocb: Pointer to driver iocb object which need to be
1503  *            posted to firmware.
1504  *
1505  * This function is called with hbalock held to post a new iocb to
1506  * the firmware. This function copies the new iocb to ring iocb slot and
1507  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1508  * a completion call back for this iocb else the function will free the
1509  * iocb object.
1510  **/
1511 static void
1512 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1513 		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1514 {
1515 	lockdep_assert_held(&phba->hbalock);
1516 	/*
1517 	 * Set up an iotag
1518 	 */
1519 	nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1520 
1521 
1522 	if (pring->ringno == LPFC_ELS_RING) {
1523 		lpfc_debugfs_slow_ring_trc(phba,
1524 			"IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1525 			*(((uint32_t *) &nextiocb->iocb) + 4),
1526 			*(((uint32_t *) &nextiocb->iocb) + 6),
1527 			*(((uint32_t *) &nextiocb->iocb) + 7));
1528 	}
1529 
1530 	/*
1531 	 * Issue iocb command to adapter
1532 	 */
1533 	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1534 	wmb();
1535 	pring->stats.iocb_cmd++;
1536 
1537 	/*
1538 	 * If there is no completion routine to call, we can release the
1539 	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1540 	 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1541 	 */
1542 	if (nextiocb->iocb_cmpl)
1543 		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1544 	else
1545 		__lpfc_sli_release_iocbq(phba, nextiocb);
1546 
1547 	/*
1548 	 * Let the HBA know what IOCB slot will be the next one the
1549 	 * driver will put a command into.
1550 	 */
1551 	pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1552 	writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1553 }
1554 
1555 /**
1556  * lpfc_sli_update_full_ring - Update the chip attention register
1557  * @phba: Pointer to HBA context object.
1558  * @pring: Pointer to driver SLI ring object.
1559  *
1560  * The caller is not required to hold any lock for calling this function.
1561  * This function updates the chip attention bits for the ring to inform firmware
1562  * that there are pending work to be done for this ring and requests an
1563  * interrupt when there is space available in the ring. This function is
1564  * called when the driver is unable to post more iocbs to the ring due
1565  * to unavailability of space in the ring.
1566  **/
1567 static void
1568 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1569 {
1570 	int ringno = pring->ringno;
1571 
1572 	pring->flag |= LPFC_CALL_RING_AVAILABLE;
1573 
1574 	wmb();
1575 
1576 	/*
1577 	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1578 	 * The HBA will tell us when an IOCB entry is available.
1579 	 */
1580 	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1581 	readl(phba->CAregaddr); /* flush */
1582 
1583 	pring->stats.iocb_cmd_full++;
1584 }
1585 
1586 /**
1587  * lpfc_sli_update_ring - Update chip attention register
1588  * @phba: Pointer to HBA context object.
1589  * @pring: Pointer to driver SLI ring object.
1590  *
1591  * This function updates the chip attention register bit for the
1592  * given ring to inform HBA that there is more work to be done
1593  * in this ring. The caller is not required to hold any lock.
1594  **/
1595 static void
1596 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1597 {
1598 	int ringno = pring->ringno;
1599 
1600 	/*
1601 	 * Tell the HBA that there is work to do in this ring.
1602 	 */
1603 	if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1604 		wmb();
1605 		writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1606 		readl(phba->CAregaddr); /* flush */
1607 	}
1608 }
1609 
1610 /**
1611  * lpfc_sli_resume_iocb - Process iocbs in the txq
1612  * @phba: Pointer to HBA context object.
1613  * @pring: Pointer to driver SLI ring object.
1614  *
1615  * This function is called with hbalock held to post pending iocbs
1616  * in the txq to the firmware. This function is called when driver
1617  * detects space available in the ring.
1618  **/
1619 static void
1620 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1621 {
1622 	IOCB_t *iocb;
1623 	struct lpfc_iocbq *nextiocb;
1624 
1625 	lockdep_assert_held(&phba->hbalock);
1626 
1627 	/*
1628 	 * Check to see if:
1629 	 *  (a) there is anything on the txq to send
1630 	 *  (b) link is up
1631 	 *  (c) link attention events can be processed (fcp ring only)
1632 	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
1633 	 */
1634 
1635 	if (lpfc_is_link_up(phba) &&
1636 	    (!list_empty(&pring->txq)) &&
1637 	    (pring->ringno != phba->sli.fcp_ring ||
1638 	     phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1639 
1640 		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1641 		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1642 			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1643 
1644 		if (iocb)
1645 			lpfc_sli_update_ring(phba, pring);
1646 		else
1647 			lpfc_sli_update_full_ring(phba, pring);
1648 	}
1649 
1650 	return;
1651 }
1652 
1653 /**
1654  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1655  * @phba: Pointer to HBA context object.
1656  * @hbqno: HBQ number.
1657  *
1658  * This function is called with hbalock held to get the next
1659  * available slot for the given HBQ. If there is free slot
1660  * available for the HBQ it will return pointer to the next available
1661  * HBQ entry else it will return NULL.
1662  **/
1663 static struct lpfc_hbq_entry *
1664 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1665 {
1666 	struct hbq_s *hbqp = &phba->hbqs[hbqno];
1667 
1668 	lockdep_assert_held(&phba->hbalock);
1669 
1670 	if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1671 	    ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1672 		hbqp->next_hbqPutIdx = 0;
1673 
1674 	if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1675 		uint32_t raw_index = phba->hbq_get[hbqno];
1676 		uint32_t getidx = le32_to_cpu(raw_index);
1677 
1678 		hbqp->local_hbqGetIdx = getidx;
1679 
1680 		if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1681 			lpfc_printf_log(phba, KERN_ERR,
1682 					LOG_SLI | LOG_VPORT,
1683 					"1802 HBQ %d: local_hbqGetIdx "
1684 					"%u is > than hbqp->entry_count %u\n",
1685 					hbqno, hbqp->local_hbqGetIdx,
1686 					hbqp->entry_count);
1687 
1688 			phba->link_state = LPFC_HBA_ERROR;
1689 			return NULL;
1690 		}
1691 
1692 		if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1693 			return NULL;
1694 	}
1695 
1696 	return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1697 			hbqp->hbqPutIdx;
1698 }
1699 
1700 /**
1701  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1702  * @phba: Pointer to HBA context object.
1703  *
1704  * This function is called with no lock held to free all the
1705  * hbq buffers while uninitializing the SLI interface. It also
1706  * frees the HBQ buffers returned by the firmware but not yet
1707  * processed by the upper layers.
1708  **/
1709 void
1710 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1711 {
1712 	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1713 	struct hbq_dmabuf *hbq_buf;
1714 	unsigned long flags;
1715 	int i, hbq_count;
1716 	uint32_t hbqno;
1717 
1718 	hbq_count = lpfc_sli_hbq_count();
1719 	/* Return all memory used by all HBQs */
1720 	spin_lock_irqsave(&phba->hbalock, flags);
1721 	for (i = 0; i < hbq_count; ++i) {
1722 		list_for_each_entry_safe(dmabuf, next_dmabuf,
1723 				&phba->hbqs[i].hbq_buffer_list, list) {
1724 			hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1725 			list_del(&hbq_buf->dbuf.list);
1726 			(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1727 		}
1728 		phba->hbqs[i].buffer_count = 0;
1729 	}
1730 	/* Return all HBQ buffer that are in-fly */
1731 	list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1732 				 list) {
1733 		hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1734 		list_del(&hbq_buf->dbuf.list);
1735 		if (hbq_buf->tag == -1) {
1736 			(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1737 				(phba, hbq_buf);
1738 		} else {
1739 			hbqno = hbq_buf->tag >> 16;
1740 			if (hbqno >= LPFC_MAX_HBQS)
1741 				(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1742 					(phba, hbq_buf);
1743 			else
1744 				(phba->hbqs[hbqno].hbq_free_buffer)(phba,
1745 					hbq_buf);
1746 		}
1747 	}
1748 
1749 	/* Mark the HBQs not in use */
1750 	phba->hbq_in_use = 0;
1751 	spin_unlock_irqrestore(&phba->hbalock, flags);
1752 }
1753 
1754 /**
1755  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1756  * @phba: Pointer to HBA context object.
1757  * @hbqno: HBQ number.
1758  * @hbq_buf: Pointer to HBQ buffer.
1759  *
1760  * This function is called with the hbalock held to post a
1761  * hbq buffer to the firmware. If the function finds an empty
1762  * slot in the HBQ, it will post the buffer. The function will return
1763  * pointer to the hbq entry if it successfully post the buffer
1764  * else it will return NULL.
1765  **/
1766 static int
1767 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1768 			 struct hbq_dmabuf *hbq_buf)
1769 {
1770 	lockdep_assert_held(&phba->hbalock);
1771 	return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1772 }
1773 
1774 /**
1775  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1776  * @phba: Pointer to HBA context object.
1777  * @hbqno: HBQ number.
1778  * @hbq_buf: Pointer to HBQ buffer.
1779  *
1780  * This function is called with the hbalock held to post a hbq buffer to the
1781  * firmware. If the function finds an empty slot in the HBQ, it will post the
1782  * buffer and place it on the hbq_buffer_list. The function will return zero if
1783  * it successfully post the buffer else it will return an error.
1784  **/
1785 static int
1786 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1787 			    struct hbq_dmabuf *hbq_buf)
1788 {
1789 	struct lpfc_hbq_entry *hbqe;
1790 	dma_addr_t physaddr = hbq_buf->dbuf.phys;
1791 
1792 	lockdep_assert_held(&phba->hbalock);
1793 	/* Get next HBQ entry slot to use */
1794 	hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1795 	if (hbqe) {
1796 		struct hbq_s *hbqp = &phba->hbqs[hbqno];
1797 
1798 		hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1799 		hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1800 		hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1801 		hbqe->bde.tus.f.bdeFlags = 0;
1802 		hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1803 		hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1804 				/* Sync SLIM */
1805 		hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1806 		writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1807 				/* flush */
1808 		readl(phba->hbq_put + hbqno);
1809 		list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1810 		return 0;
1811 	} else
1812 		return -ENOMEM;
1813 }
1814 
1815 /**
1816  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1817  * @phba: Pointer to HBA context object.
1818  * @hbqno: HBQ number.
1819  * @hbq_buf: Pointer to HBQ buffer.
1820  *
1821  * This function is called with the hbalock held to post an RQE to the SLI4
1822  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1823  * the hbq_buffer_list and return zero, otherwise it will return an error.
1824  **/
1825 static int
1826 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1827 			    struct hbq_dmabuf *hbq_buf)
1828 {
1829 	int rc;
1830 	struct lpfc_rqe hrqe;
1831 	struct lpfc_rqe drqe;
1832 
1833 	lockdep_assert_held(&phba->hbalock);
1834 	hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1835 	hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1836 	drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1837 	drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1838 	rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1839 			      &hrqe, &drqe);
1840 	if (rc < 0)
1841 		return rc;
1842 	hbq_buf->tag = rc;
1843 	list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1844 	return 0;
1845 }
1846 
1847 /* HBQ for ELS and CT traffic. */
1848 static struct lpfc_hbq_init lpfc_els_hbq = {
1849 	.rn = 1,
1850 	.entry_count = 256,
1851 	.mask_count = 0,
1852 	.profile = 0,
1853 	.ring_mask = (1 << LPFC_ELS_RING),
1854 	.buffer_count = 0,
1855 	.init_count = 40,
1856 	.add_count = 40,
1857 };
1858 
1859 /* HBQ for the extra ring if needed */
1860 static struct lpfc_hbq_init lpfc_extra_hbq = {
1861 	.rn = 1,
1862 	.entry_count = 200,
1863 	.mask_count = 0,
1864 	.profile = 0,
1865 	.ring_mask = (1 << LPFC_EXTRA_RING),
1866 	.buffer_count = 0,
1867 	.init_count = 0,
1868 	.add_count = 5,
1869 };
1870 
1871 /* Array of HBQs */
1872 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1873 	&lpfc_els_hbq,
1874 	&lpfc_extra_hbq,
1875 };
1876 
1877 /**
1878  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1879  * @phba: Pointer to HBA context object.
1880  * @hbqno: HBQ number.
1881  * @count: Number of HBQ buffers to be posted.
1882  *
1883  * This function is called with no lock held to post more hbq buffers to the
1884  * given HBQ. The function returns the number of HBQ buffers successfully
1885  * posted.
1886  **/
1887 static int
1888 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1889 {
1890 	uint32_t i, posted = 0;
1891 	unsigned long flags;
1892 	struct hbq_dmabuf *hbq_buffer;
1893 	LIST_HEAD(hbq_buf_list);
1894 	if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1895 		return 0;
1896 
1897 	if ((phba->hbqs[hbqno].buffer_count + count) >
1898 	    lpfc_hbq_defs[hbqno]->entry_count)
1899 		count = lpfc_hbq_defs[hbqno]->entry_count -
1900 					phba->hbqs[hbqno].buffer_count;
1901 	if (!count)
1902 		return 0;
1903 	/* Allocate HBQ entries */
1904 	for (i = 0; i < count; i++) {
1905 		hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1906 		if (!hbq_buffer)
1907 			break;
1908 		list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1909 	}
1910 	/* Check whether HBQ is still in use */
1911 	spin_lock_irqsave(&phba->hbalock, flags);
1912 	if (!phba->hbq_in_use)
1913 		goto err;
1914 	while (!list_empty(&hbq_buf_list)) {
1915 		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1916 				 dbuf.list);
1917 		hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1918 				      (hbqno << 16));
1919 		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1920 			phba->hbqs[hbqno].buffer_count++;
1921 			posted++;
1922 		} else
1923 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1924 	}
1925 	spin_unlock_irqrestore(&phba->hbalock, flags);
1926 	return posted;
1927 err:
1928 	spin_unlock_irqrestore(&phba->hbalock, flags);
1929 	while (!list_empty(&hbq_buf_list)) {
1930 		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1931 				 dbuf.list);
1932 		(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1933 	}
1934 	return 0;
1935 }
1936 
1937 /**
1938  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1939  * @phba: Pointer to HBA context object.
1940  * @qno: HBQ number.
1941  *
1942  * This function posts more buffers to the HBQ. This function
1943  * is called with no lock held. The function returns the number of HBQ entries
1944  * successfully allocated.
1945  **/
1946 int
1947 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1948 {
1949 	if (phba->sli_rev == LPFC_SLI_REV4)
1950 		return 0;
1951 	else
1952 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1953 					 lpfc_hbq_defs[qno]->add_count);
1954 }
1955 
1956 /**
1957  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1958  * @phba: Pointer to HBA context object.
1959  * @qno:  HBQ queue number.
1960  *
1961  * This function is called from SLI initialization code path with
1962  * no lock held to post initial HBQ buffers to firmware. The
1963  * function returns the number of HBQ entries successfully allocated.
1964  **/
1965 static int
1966 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1967 {
1968 	if (phba->sli_rev == LPFC_SLI_REV4)
1969 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1970 					lpfc_hbq_defs[qno]->entry_count);
1971 	else
1972 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1973 					 lpfc_hbq_defs[qno]->init_count);
1974 }
1975 
1976 /**
1977  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1978  * @phba: Pointer to HBA context object.
1979  * @hbqno: HBQ number.
1980  *
1981  * This function removes the first hbq buffer on an hbq list and returns a
1982  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1983  **/
1984 static struct hbq_dmabuf *
1985 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1986 {
1987 	struct lpfc_dmabuf *d_buf;
1988 
1989 	list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1990 	if (!d_buf)
1991 		return NULL;
1992 	return container_of(d_buf, struct hbq_dmabuf, dbuf);
1993 }
1994 
1995 /**
1996  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1997  * @phba: Pointer to HBA context object.
1998  * @tag: Tag of the hbq buffer.
1999  *
2000  * This function searches for the hbq buffer associated with the given tag in
2001  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2002  * otherwise it returns NULL.
2003  **/
2004 static struct hbq_dmabuf *
2005 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2006 {
2007 	struct lpfc_dmabuf *d_buf;
2008 	struct hbq_dmabuf *hbq_buf;
2009 	uint32_t hbqno;
2010 
2011 	hbqno = tag >> 16;
2012 	if (hbqno >= LPFC_MAX_HBQS)
2013 		return NULL;
2014 
2015 	spin_lock_irq(&phba->hbalock);
2016 	list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2017 		hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2018 		if (hbq_buf->tag == tag) {
2019 			spin_unlock_irq(&phba->hbalock);
2020 			return hbq_buf;
2021 		}
2022 	}
2023 	spin_unlock_irq(&phba->hbalock);
2024 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2025 			"1803 Bad hbq tag. Data: x%x x%x\n",
2026 			tag, phba->hbqs[tag >> 16].buffer_count);
2027 	return NULL;
2028 }
2029 
2030 /**
2031  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2032  * @phba: Pointer to HBA context object.
2033  * @hbq_buffer: Pointer to HBQ buffer.
2034  *
2035  * This function is called with hbalock. This function gives back
2036  * the hbq buffer to firmware. If the HBQ does not have space to
2037  * post the buffer, it will free the buffer.
2038  **/
2039 void
2040 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2041 {
2042 	uint32_t hbqno;
2043 
2044 	if (hbq_buffer) {
2045 		hbqno = hbq_buffer->tag >> 16;
2046 		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2047 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2048 	}
2049 }
2050 
2051 /**
2052  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2053  * @mbxCommand: mailbox command code.
2054  *
2055  * This function is called by the mailbox event handler function to verify
2056  * that the completed mailbox command is a legitimate mailbox command. If the
2057  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2058  * and the mailbox event handler will take the HBA offline.
2059  **/
2060 static int
2061 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2062 {
2063 	uint8_t ret;
2064 
2065 	switch (mbxCommand) {
2066 	case MBX_LOAD_SM:
2067 	case MBX_READ_NV:
2068 	case MBX_WRITE_NV:
2069 	case MBX_WRITE_VPARMS:
2070 	case MBX_RUN_BIU_DIAG:
2071 	case MBX_INIT_LINK:
2072 	case MBX_DOWN_LINK:
2073 	case MBX_CONFIG_LINK:
2074 	case MBX_CONFIG_RING:
2075 	case MBX_RESET_RING:
2076 	case MBX_READ_CONFIG:
2077 	case MBX_READ_RCONFIG:
2078 	case MBX_READ_SPARM:
2079 	case MBX_READ_STATUS:
2080 	case MBX_READ_RPI:
2081 	case MBX_READ_XRI:
2082 	case MBX_READ_REV:
2083 	case MBX_READ_LNK_STAT:
2084 	case MBX_REG_LOGIN:
2085 	case MBX_UNREG_LOGIN:
2086 	case MBX_CLEAR_LA:
2087 	case MBX_DUMP_MEMORY:
2088 	case MBX_DUMP_CONTEXT:
2089 	case MBX_RUN_DIAGS:
2090 	case MBX_RESTART:
2091 	case MBX_UPDATE_CFG:
2092 	case MBX_DOWN_LOAD:
2093 	case MBX_DEL_LD_ENTRY:
2094 	case MBX_RUN_PROGRAM:
2095 	case MBX_SET_MASK:
2096 	case MBX_SET_VARIABLE:
2097 	case MBX_UNREG_D_ID:
2098 	case MBX_KILL_BOARD:
2099 	case MBX_CONFIG_FARP:
2100 	case MBX_BEACON:
2101 	case MBX_LOAD_AREA:
2102 	case MBX_RUN_BIU_DIAG64:
2103 	case MBX_CONFIG_PORT:
2104 	case MBX_READ_SPARM64:
2105 	case MBX_READ_RPI64:
2106 	case MBX_REG_LOGIN64:
2107 	case MBX_READ_TOPOLOGY:
2108 	case MBX_WRITE_WWN:
2109 	case MBX_SET_DEBUG:
2110 	case MBX_LOAD_EXP_ROM:
2111 	case MBX_ASYNCEVT_ENABLE:
2112 	case MBX_REG_VPI:
2113 	case MBX_UNREG_VPI:
2114 	case MBX_HEARTBEAT:
2115 	case MBX_PORT_CAPABILITIES:
2116 	case MBX_PORT_IOV_CONTROL:
2117 	case MBX_SLI4_CONFIG:
2118 	case MBX_SLI4_REQ_FTRS:
2119 	case MBX_REG_FCFI:
2120 	case MBX_UNREG_FCFI:
2121 	case MBX_REG_VFI:
2122 	case MBX_UNREG_VFI:
2123 	case MBX_INIT_VPI:
2124 	case MBX_INIT_VFI:
2125 	case MBX_RESUME_RPI:
2126 	case MBX_READ_EVENT_LOG_STATUS:
2127 	case MBX_READ_EVENT_LOG:
2128 	case MBX_SECURITY_MGMT:
2129 	case MBX_AUTH_PORT:
2130 	case MBX_ACCESS_VDATA:
2131 		ret = mbxCommand;
2132 		break;
2133 	default:
2134 		ret = MBX_SHUTDOWN;
2135 		break;
2136 	}
2137 	return ret;
2138 }
2139 
2140 /**
2141  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2142  * @phba: Pointer to HBA context object.
2143  * @pmboxq: Pointer to mailbox command.
2144  *
2145  * This is completion handler function for mailbox commands issued from
2146  * lpfc_sli_issue_mbox_wait function. This function is called by the
2147  * mailbox event handler function with no lock held. This function
2148  * will wake up thread waiting on the wait queue pointed by context1
2149  * of the mailbox.
2150  **/
2151 void
2152 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2153 {
2154 	wait_queue_head_t *pdone_q;
2155 	unsigned long drvr_flag;
2156 
2157 	/*
2158 	 * If pdone_q is empty, the driver thread gave up waiting and
2159 	 * continued running.
2160 	 */
2161 	pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2162 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
2163 	pdone_q = (wait_queue_head_t *) pmboxq->context1;
2164 	if (pdone_q)
2165 		wake_up_interruptible(pdone_q);
2166 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2167 	return;
2168 }
2169 
2170 
2171 /**
2172  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2173  * @phba: Pointer to HBA context object.
2174  * @pmb: Pointer to mailbox object.
2175  *
2176  * This function is the default mailbox completion handler. It
2177  * frees the memory resources associated with the completed mailbox
2178  * command. If the completed command is a REG_LOGIN mailbox command,
2179  * this function will issue a UREG_LOGIN to re-claim the RPI.
2180  **/
2181 void
2182 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2183 {
2184 	struct lpfc_vport  *vport = pmb->vport;
2185 	struct lpfc_dmabuf *mp;
2186 	struct lpfc_nodelist *ndlp;
2187 	struct Scsi_Host *shost;
2188 	uint16_t rpi, vpi;
2189 	int rc;
2190 
2191 	mp = (struct lpfc_dmabuf *) (pmb->context1);
2192 
2193 	if (mp) {
2194 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
2195 		kfree(mp);
2196 	}
2197 
2198 	/*
2199 	 * If a REG_LOGIN succeeded  after node is destroyed or node
2200 	 * is in re-discovery driver need to cleanup the RPI.
2201 	 */
2202 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
2203 	    pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2204 	    !pmb->u.mb.mbxStatus) {
2205 		rpi = pmb->u.mb.un.varWords[0];
2206 		vpi = pmb->u.mb.un.varRegLogin.vpi;
2207 		lpfc_unreg_login(phba, vpi, rpi, pmb);
2208 		pmb->vport = vport;
2209 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2210 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2211 		if (rc != MBX_NOT_FINISHED)
2212 			return;
2213 	}
2214 
2215 	if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2216 		!(phba->pport->load_flag & FC_UNLOADING) &&
2217 		!pmb->u.mb.mbxStatus) {
2218 		shost = lpfc_shost_from_vport(vport);
2219 		spin_lock_irq(shost->host_lock);
2220 		vport->vpi_state |= LPFC_VPI_REGISTERED;
2221 		vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2222 		spin_unlock_irq(shost->host_lock);
2223 	}
2224 
2225 	if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2226 		ndlp = (struct lpfc_nodelist *)pmb->context2;
2227 		lpfc_nlp_put(ndlp);
2228 		pmb->context2 = NULL;
2229 	}
2230 
2231 	/* Check security permission status on INIT_LINK mailbox command */
2232 	if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2233 	    (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2234 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2235 				"2860 SLI authentication is required "
2236 				"for INIT_LINK but has not done yet\n");
2237 
2238 	if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2239 		lpfc_sli4_mbox_cmd_free(phba, pmb);
2240 	else
2241 		mempool_free(pmb, phba->mbox_mem_pool);
2242 }
2243  /**
2244  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2245  * @phba: Pointer to HBA context object.
2246  * @pmb: Pointer to mailbox object.
2247  *
2248  * This function is the unreg rpi mailbox completion handler. It
2249  * frees the memory resources associated with the completed mailbox
2250  * command. An additional refrenece is put on the ndlp to prevent
2251  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2252  * the unreg mailbox command completes, this routine puts the
2253  * reference back.
2254  *
2255  **/
2256 void
2257 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2258 {
2259 	struct lpfc_vport  *vport = pmb->vport;
2260 	struct lpfc_nodelist *ndlp;
2261 
2262 	ndlp = pmb->context1;
2263 	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2264 		if (phba->sli_rev == LPFC_SLI_REV4 &&
2265 		    (bf_get(lpfc_sli_intf_if_type,
2266 		     &phba->sli4_hba.sli_intf) ==
2267 		     LPFC_SLI_INTF_IF_TYPE_2)) {
2268 			if (ndlp) {
2269 				lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2270 						 "0010 UNREG_LOGIN vpi:%x "
2271 						 "rpi:%x DID:%x map:%x %p\n",
2272 						 vport->vpi, ndlp->nlp_rpi,
2273 						 ndlp->nlp_DID,
2274 						 ndlp->nlp_usg_map, ndlp);
2275 				ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2276 				lpfc_nlp_put(ndlp);
2277 			}
2278 		}
2279 	}
2280 
2281 	mempool_free(pmb, phba->mbox_mem_pool);
2282 }
2283 
2284 /**
2285  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2286  * @phba: Pointer to HBA context object.
2287  *
2288  * This function is called with no lock held. This function processes all
2289  * the completed mailbox commands and gives it to upper layers. The interrupt
2290  * service routine processes mailbox completion interrupt and adds completed
2291  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2292  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2293  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2294  * function returns the mailbox commands to the upper layer by calling the
2295  * completion handler function of each mailbox.
2296  **/
2297 int
2298 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2299 {
2300 	MAILBOX_t *pmbox;
2301 	LPFC_MBOXQ_t *pmb;
2302 	int rc;
2303 	LIST_HEAD(cmplq);
2304 
2305 	phba->sli.slistat.mbox_event++;
2306 
2307 	/* Get all completed mailboxe buffers into the cmplq */
2308 	spin_lock_irq(&phba->hbalock);
2309 	list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2310 	spin_unlock_irq(&phba->hbalock);
2311 
2312 	/* Get a Mailbox buffer to setup mailbox commands for callback */
2313 	do {
2314 		list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2315 		if (pmb == NULL)
2316 			break;
2317 
2318 		pmbox = &pmb->u.mb;
2319 
2320 		if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2321 			if (pmb->vport) {
2322 				lpfc_debugfs_disc_trc(pmb->vport,
2323 					LPFC_DISC_TRC_MBOX_VPORT,
2324 					"MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2325 					(uint32_t)pmbox->mbxCommand,
2326 					pmbox->un.varWords[0],
2327 					pmbox->un.varWords[1]);
2328 			}
2329 			else {
2330 				lpfc_debugfs_disc_trc(phba->pport,
2331 					LPFC_DISC_TRC_MBOX,
2332 					"MBOX cmpl:       cmd:x%x mb:x%x x%x",
2333 					(uint32_t)pmbox->mbxCommand,
2334 					pmbox->un.varWords[0],
2335 					pmbox->un.varWords[1]);
2336 			}
2337 		}
2338 
2339 		/*
2340 		 * It is a fatal error if unknown mbox command completion.
2341 		 */
2342 		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2343 		    MBX_SHUTDOWN) {
2344 			/* Unknown mailbox command compl */
2345 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2346 					"(%d):0323 Unknown Mailbox command "
2347 					"x%x (x%x/x%x) Cmpl\n",
2348 					pmb->vport ? pmb->vport->vpi : 0,
2349 					pmbox->mbxCommand,
2350 					lpfc_sli_config_mbox_subsys_get(phba,
2351 									pmb),
2352 					lpfc_sli_config_mbox_opcode_get(phba,
2353 									pmb));
2354 			phba->link_state = LPFC_HBA_ERROR;
2355 			phba->work_hs = HS_FFER3;
2356 			lpfc_handle_eratt(phba);
2357 			continue;
2358 		}
2359 
2360 		if (pmbox->mbxStatus) {
2361 			phba->sli.slistat.mbox_stat_err++;
2362 			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2363 				/* Mbox cmd cmpl error - RETRYing */
2364 				lpfc_printf_log(phba, KERN_INFO,
2365 					LOG_MBOX | LOG_SLI,
2366 					"(%d):0305 Mbox cmd cmpl "
2367 					"error - RETRYing Data: x%x "
2368 					"(x%x/x%x) x%x x%x x%x\n",
2369 					pmb->vport ? pmb->vport->vpi : 0,
2370 					pmbox->mbxCommand,
2371 					lpfc_sli_config_mbox_subsys_get(phba,
2372 									pmb),
2373 					lpfc_sli_config_mbox_opcode_get(phba,
2374 									pmb),
2375 					pmbox->mbxStatus,
2376 					pmbox->un.varWords[0],
2377 					pmb->vport->port_state);
2378 				pmbox->mbxStatus = 0;
2379 				pmbox->mbxOwner = OWN_HOST;
2380 				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2381 				if (rc != MBX_NOT_FINISHED)
2382 					continue;
2383 			}
2384 		}
2385 
2386 		/* Mailbox cmd <cmd> Cmpl <cmpl> */
2387 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2388 				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2389 				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2390 				"x%x x%x x%x\n",
2391 				pmb->vport ? pmb->vport->vpi : 0,
2392 				pmbox->mbxCommand,
2393 				lpfc_sli_config_mbox_subsys_get(phba, pmb),
2394 				lpfc_sli_config_mbox_opcode_get(phba, pmb),
2395 				pmb->mbox_cmpl,
2396 				*((uint32_t *) pmbox),
2397 				pmbox->un.varWords[0],
2398 				pmbox->un.varWords[1],
2399 				pmbox->un.varWords[2],
2400 				pmbox->un.varWords[3],
2401 				pmbox->un.varWords[4],
2402 				pmbox->un.varWords[5],
2403 				pmbox->un.varWords[6],
2404 				pmbox->un.varWords[7],
2405 				pmbox->un.varWords[8],
2406 				pmbox->un.varWords[9],
2407 				pmbox->un.varWords[10]);
2408 
2409 		if (pmb->mbox_cmpl)
2410 			pmb->mbox_cmpl(phba,pmb);
2411 	} while (1);
2412 	return 0;
2413 }
2414 
2415 /**
2416  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2417  * @phba: Pointer to HBA context object.
2418  * @pring: Pointer to driver SLI ring object.
2419  * @tag: buffer tag.
2420  *
2421  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2422  * is set in the tag the buffer is posted for a particular exchange,
2423  * the function will return the buffer without replacing the buffer.
2424  * If the buffer is for unsolicited ELS or CT traffic, this function
2425  * returns the buffer and also posts another buffer to the firmware.
2426  **/
2427 static struct lpfc_dmabuf *
2428 lpfc_sli_get_buff(struct lpfc_hba *phba,
2429 		  struct lpfc_sli_ring *pring,
2430 		  uint32_t tag)
2431 {
2432 	struct hbq_dmabuf *hbq_entry;
2433 
2434 	if (tag & QUE_BUFTAG_BIT)
2435 		return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2436 	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2437 	if (!hbq_entry)
2438 		return NULL;
2439 	return &hbq_entry->dbuf;
2440 }
2441 
2442 /**
2443  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2444  * @phba: Pointer to HBA context object.
2445  * @pring: Pointer to driver SLI ring object.
2446  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2447  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2448  * @fch_type: the type for the first frame of the sequence.
2449  *
2450  * This function is called with no lock held. This function uses the r_ctl and
2451  * type of the received sequence to find the correct callback function to call
2452  * to process the sequence.
2453  **/
2454 static int
2455 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2456 			 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2457 			 uint32_t fch_type)
2458 {
2459 	int i;
2460 
2461 	/* unSolicited Responses */
2462 	if (pring->prt[0].profile) {
2463 		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2464 			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2465 									saveq);
2466 		return 1;
2467 	}
2468 	/* We must search, based on rctl / type
2469 	   for the right routine */
2470 	for (i = 0; i < pring->num_mask; i++) {
2471 		if ((pring->prt[i].rctl == fch_r_ctl) &&
2472 		    (pring->prt[i].type == fch_type)) {
2473 			if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2474 				(pring->prt[i].lpfc_sli_rcv_unsol_event)
2475 						(phba, pring, saveq);
2476 			return 1;
2477 		}
2478 	}
2479 	return 0;
2480 }
2481 
2482 /**
2483  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2484  * @phba: Pointer to HBA context object.
2485  * @pring: Pointer to driver SLI ring object.
2486  * @saveq: Pointer to the unsolicited iocb.
2487  *
2488  * This function is called with no lock held by the ring event handler
2489  * when there is an unsolicited iocb posted to the response ring by the
2490  * firmware. This function gets the buffer associated with the iocbs
2491  * and calls the event handler for the ring. This function handles both
2492  * qring buffers and hbq buffers.
2493  * When the function returns 1 the caller can free the iocb object otherwise
2494  * upper layer functions will free the iocb objects.
2495  **/
2496 static int
2497 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2498 			    struct lpfc_iocbq *saveq)
2499 {
2500 	IOCB_t           * irsp;
2501 	WORD5            * w5p;
2502 	uint32_t           Rctl, Type;
2503 	struct lpfc_iocbq *iocbq;
2504 	struct lpfc_dmabuf *dmzbuf;
2505 
2506 	irsp = &(saveq->iocb);
2507 
2508 	if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2509 		if (pring->lpfc_sli_rcv_async_status)
2510 			pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2511 		else
2512 			lpfc_printf_log(phba,
2513 					KERN_WARNING,
2514 					LOG_SLI,
2515 					"0316 Ring %d handler: unexpected "
2516 					"ASYNC_STATUS iocb received evt_code "
2517 					"0x%x\n",
2518 					pring->ringno,
2519 					irsp->un.asyncstat.evt_code);
2520 		return 1;
2521 	}
2522 
2523 	if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2524 		(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2525 		if (irsp->ulpBdeCount > 0) {
2526 			dmzbuf = lpfc_sli_get_buff(phba, pring,
2527 					irsp->un.ulpWord[3]);
2528 			lpfc_in_buf_free(phba, dmzbuf);
2529 		}
2530 
2531 		if (irsp->ulpBdeCount > 1) {
2532 			dmzbuf = lpfc_sli_get_buff(phba, pring,
2533 					irsp->unsli3.sli3Words[3]);
2534 			lpfc_in_buf_free(phba, dmzbuf);
2535 		}
2536 
2537 		if (irsp->ulpBdeCount > 2) {
2538 			dmzbuf = lpfc_sli_get_buff(phba, pring,
2539 				irsp->unsli3.sli3Words[7]);
2540 			lpfc_in_buf_free(phba, dmzbuf);
2541 		}
2542 
2543 		return 1;
2544 	}
2545 
2546 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2547 		if (irsp->ulpBdeCount != 0) {
2548 			saveq->context2 = lpfc_sli_get_buff(phba, pring,
2549 						irsp->un.ulpWord[3]);
2550 			if (!saveq->context2)
2551 				lpfc_printf_log(phba,
2552 					KERN_ERR,
2553 					LOG_SLI,
2554 					"0341 Ring %d Cannot find buffer for "
2555 					"an unsolicited iocb. tag 0x%x\n",
2556 					pring->ringno,
2557 					irsp->un.ulpWord[3]);
2558 		}
2559 		if (irsp->ulpBdeCount == 2) {
2560 			saveq->context3 = lpfc_sli_get_buff(phba, pring,
2561 						irsp->unsli3.sli3Words[7]);
2562 			if (!saveq->context3)
2563 				lpfc_printf_log(phba,
2564 					KERN_ERR,
2565 					LOG_SLI,
2566 					"0342 Ring %d Cannot find buffer for an"
2567 					" unsolicited iocb. tag 0x%x\n",
2568 					pring->ringno,
2569 					irsp->unsli3.sli3Words[7]);
2570 		}
2571 		list_for_each_entry(iocbq, &saveq->list, list) {
2572 			irsp = &(iocbq->iocb);
2573 			if (irsp->ulpBdeCount != 0) {
2574 				iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2575 							irsp->un.ulpWord[3]);
2576 				if (!iocbq->context2)
2577 					lpfc_printf_log(phba,
2578 						KERN_ERR,
2579 						LOG_SLI,
2580 						"0343 Ring %d Cannot find "
2581 						"buffer for an unsolicited iocb"
2582 						". tag 0x%x\n", pring->ringno,
2583 						irsp->un.ulpWord[3]);
2584 			}
2585 			if (irsp->ulpBdeCount == 2) {
2586 				iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2587 						irsp->unsli3.sli3Words[7]);
2588 				if (!iocbq->context3)
2589 					lpfc_printf_log(phba,
2590 						KERN_ERR,
2591 						LOG_SLI,
2592 						"0344 Ring %d Cannot find "
2593 						"buffer for an unsolicited "
2594 						"iocb. tag 0x%x\n",
2595 						pring->ringno,
2596 						irsp->unsli3.sli3Words[7]);
2597 			}
2598 		}
2599 	}
2600 	if (irsp->ulpBdeCount != 0 &&
2601 	    (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2602 	     irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2603 		int found = 0;
2604 
2605 		/* search continue save q for same XRI */
2606 		list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2607 			if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2608 				saveq->iocb.unsli3.rcvsli3.ox_id) {
2609 				list_add_tail(&saveq->list, &iocbq->list);
2610 				found = 1;
2611 				break;
2612 			}
2613 		}
2614 		if (!found)
2615 			list_add_tail(&saveq->clist,
2616 				      &pring->iocb_continue_saveq);
2617 		if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2618 			list_del_init(&iocbq->clist);
2619 			saveq = iocbq;
2620 			irsp = &(saveq->iocb);
2621 		} else
2622 			return 0;
2623 	}
2624 	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2625 	    (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2626 	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2627 		Rctl = FC_RCTL_ELS_REQ;
2628 		Type = FC_TYPE_ELS;
2629 	} else {
2630 		w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2631 		Rctl = w5p->hcsw.Rctl;
2632 		Type = w5p->hcsw.Type;
2633 
2634 		/* Firmware Workaround */
2635 		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2636 			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2637 			 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2638 			Rctl = FC_RCTL_ELS_REQ;
2639 			Type = FC_TYPE_ELS;
2640 			w5p->hcsw.Rctl = Rctl;
2641 			w5p->hcsw.Type = Type;
2642 		}
2643 	}
2644 
2645 	if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2646 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2647 				"0313 Ring %d handler: unexpected Rctl x%x "
2648 				"Type x%x received\n",
2649 				pring->ringno, Rctl, Type);
2650 
2651 	return 1;
2652 }
2653 
2654 /**
2655  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2656  * @phba: Pointer to HBA context object.
2657  * @pring: Pointer to driver SLI ring object.
2658  * @prspiocb: Pointer to response iocb object.
2659  *
2660  * This function looks up the iocb_lookup table to get the command iocb
2661  * corresponding to the given response iocb using the iotag of the
2662  * response iocb. This function is called with the hbalock held.
2663  * This function returns the command iocb object if it finds the command
2664  * iocb else returns NULL.
2665  **/
2666 static struct lpfc_iocbq *
2667 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2668 		      struct lpfc_sli_ring *pring,
2669 		      struct lpfc_iocbq *prspiocb)
2670 {
2671 	struct lpfc_iocbq *cmd_iocb = NULL;
2672 	uint16_t iotag;
2673 	lockdep_assert_held(&phba->hbalock);
2674 
2675 	iotag = prspiocb->iocb.ulpIoTag;
2676 
2677 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2678 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
2679 		list_del_init(&cmd_iocb->list);
2680 		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2681 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2682 		}
2683 		return cmd_iocb;
2684 	}
2685 
2686 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2687 			"0317 iotag x%x is out off "
2688 			"range: max iotag x%x wd0 x%x\n",
2689 			iotag, phba->sli.last_iotag,
2690 			*(((uint32_t *) &prspiocb->iocb) + 7));
2691 	return NULL;
2692 }
2693 
2694 /**
2695  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2696  * @phba: Pointer to HBA context object.
2697  * @pring: Pointer to driver SLI ring object.
2698  * @iotag: IOCB tag.
2699  *
2700  * This function looks up the iocb_lookup table to get the command iocb
2701  * corresponding to the given iotag. This function is called with the
2702  * hbalock held.
2703  * This function returns the command iocb object if it finds the command
2704  * iocb else returns NULL.
2705  **/
2706 static struct lpfc_iocbq *
2707 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2708 			     struct lpfc_sli_ring *pring, uint16_t iotag)
2709 {
2710 	struct lpfc_iocbq *cmd_iocb;
2711 
2712 	lockdep_assert_held(&phba->hbalock);
2713 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2714 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
2715 		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2716 			/* remove from txcmpl queue list */
2717 			list_del_init(&cmd_iocb->list);
2718 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2719 			return cmd_iocb;
2720 		}
2721 	}
2722 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2723 			"0372 iotag x%x is out off range: max iotag (x%x)\n",
2724 			iotag, phba->sli.last_iotag);
2725 	return NULL;
2726 }
2727 
2728 /**
2729  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2730  * @phba: Pointer to HBA context object.
2731  * @pring: Pointer to driver SLI ring object.
2732  * @saveq: Pointer to the response iocb to be processed.
2733  *
2734  * This function is called by the ring event handler for non-fcp
2735  * rings when there is a new response iocb in the response ring.
2736  * The caller is not required to hold any locks. This function
2737  * gets the command iocb associated with the response iocb and
2738  * calls the completion handler for the command iocb. If there
2739  * is no completion handler, the function will free the resources
2740  * associated with command iocb. If the response iocb is for
2741  * an already aborted command iocb, the status of the completion
2742  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2743  * This function always returns 1.
2744  **/
2745 static int
2746 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2747 			  struct lpfc_iocbq *saveq)
2748 {
2749 	struct lpfc_iocbq *cmdiocbp;
2750 	int rc = 1;
2751 	unsigned long iflag;
2752 
2753 	/* Based on the iotag field, get the cmd IOCB from the txcmplq */
2754 	spin_lock_irqsave(&phba->hbalock, iflag);
2755 	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2756 	spin_unlock_irqrestore(&phba->hbalock, iflag);
2757 
2758 	if (cmdiocbp) {
2759 		if (cmdiocbp->iocb_cmpl) {
2760 			/*
2761 			 * If an ELS command failed send an event to mgmt
2762 			 * application.
2763 			 */
2764 			if (saveq->iocb.ulpStatus &&
2765 			     (pring->ringno == LPFC_ELS_RING) &&
2766 			     (cmdiocbp->iocb.ulpCommand ==
2767 				CMD_ELS_REQUEST64_CR))
2768 				lpfc_send_els_failure_event(phba,
2769 					cmdiocbp, saveq);
2770 
2771 			/*
2772 			 * Post all ELS completions to the worker thread.
2773 			 * All other are passed to the completion callback.
2774 			 */
2775 			if (pring->ringno == LPFC_ELS_RING) {
2776 				if ((phba->sli_rev < LPFC_SLI_REV4) &&
2777 				    (cmdiocbp->iocb_flag &
2778 							LPFC_DRIVER_ABORTED)) {
2779 					spin_lock_irqsave(&phba->hbalock,
2780 							  iflag);
2781 					cmdiocbp->iocb_flag &=
2782 						~LPFC_DRIVER_ABORTED;
2783 					spin_unlock_irqrestore(&phba->hbalock,
2784 							       iflag);
2785 					saveq->iocb.ulpStatus =
2786 						IOSTAT_LOCAL_REJECT;
2787 					saveq->iocb.un.ulpWord[4] =
2788 						IOERR_SLI_ABORTED;
2789 
2790 					/* Firmware could still be in progress
2791 					 * of DMAing payload, so don't free data
2792 					 * buffer till after a hbeat.
2793 					 */
2794 					spin_lock_irqsave(&phba->hbalock,
2795 							  iflag);
2796 					saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2797 					spin_unlock_irqrestore(&phba->hbalock,
2798 							       iflag);
2799 				}
2800 				if (phba->sli_rev == LPFC_SLI_REV4) {
2801 					if (saveq->iocb_flag &
2802 					    LPFC_EXCHANGE_BUSY) {
2803 						/* Set cmdiocb flag for the
2804 						 * exchange busy so sgl (xri)
2805 						 * will not be released until
2806 						 * the abort xri is received
2807 						 * from hba.
2808 						 */
2809 						spin_lock_irqsave(
2810 							&phba->hbalock, iflag);
2811 						cmdiocbp->iocb_flag |=
2812 							LPFC_EXCHANGE_BUSY;
2813 						spin_unlock_irqrestore(
2814 							&phba->hbalock, iflag);
2815 					}
2816 					if (cmdiocbp->iocb_flag &
2817 					    LPFC_DRIVER_ABORTED) {
2818 						/*
2819 						 * Clear LPFC_DRIVER_ABORTED
2820 						 * bit in case it was driver
2821 						 * initiated abort.
2822 						 */
2823 						spin_lock_irqsave(
2824 							&phba->hbalock, iflag);
2825 						cmdiocbp->iocb_flag &=
2826 							~LPFC_DRIVER_ABORTED;
2827 						spin_unlock_irqrestore(
2828 							&phba->hbalock, iflag);
2829 						cmdiocbp->iocb.ulpStatus =
2830 							IOSTAT_LOCAL_REJECT;
2831 						cmdiocbp->iocb.un.ulpWord[4] =
2832 							IOERR_ABORT_REQUESTED;
2833 						/*
2834 						 * For SLI4, irsiocb contains
2835 						 * NO_XRI in sli_xritag, it
2836 						 * shall not affect releasing
2837 						 * sgl (xri) process.
2838 						 */
2839 						saveq->iocb.ulpStatus =
2840 							IOSTAT_LOCAL_REJECT;
2841 						saveq->iocb.un.ulpWord[4] =
2842 							IOERR_SLI_ABORTED;
2843 						spin_lock_irqsave(
2844 							&phba->hbalock, iflag);
2845 						saveq->iocb_flag |=
2846 							LPFC_DELAY_MEM_FREE;
2847 						spin_unlock_irqrestore(
2848 							&phba->hbalock, iflag);
2849 					}
2850 				}
2851 			}
2852 			(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2853 		} else
2854 			lpfc_sli_release_iocbq(phba, cmdiocbp);
2855 	} else {
2856 		/*
2857 		 * Unknown initiating command based on the response iotag.
2858 		 * This could be the case on the ELS ring because of
2859 		 * lpfc_els_abort().
2860 		 */
2861 		if (pring->ringno != LPFC_ELS_RING) {
2862 			/*
2863 			 * Ring <ringno> handler: unexpected completion IoTag
2864 			 * <IoTag>
2865 			 */
2866 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2867 					 "0322 Ring %d handler: "
2868 					 "unexpected completion IoTag x%x "
2869 					 "Data: x%x x%x x%x x%x\n",
2870 					 pring->ringno,
2871 					 saveq->iocb.ulpIoTag,
2872 					 saveq->iocb.ulpStatus,
2873 					 saveq->iocb.un.ulpWord[4],
2874 					 saveq->iocb.ulpCommand,
2875 					 saveq->iocb.ulpContext);
2876 		}
2877 	}
2878 
2879 	return rc;
2880 }
2881 
2882 /**
2883  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2884  * @phba: Pointer to HBA context object.
2885  * @pring: Pointer to driver SLI ring object.
2886  *
2887  * This function is called from the iocb ring event handlers when
2888  * put pointer is ahead of the get pointer for a ring. This function signal
2889  * an error attention condition to the worker thread and the worker
2890  * thread will transition the HBA to offline state.
2891  **/
2892 static void
2893 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2894 {
2895 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2896 	/*
2897 	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2898 	 * rsp ring <portRspMax>
2899 	 */
2900 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2901 			"0312 Ring %d handler: portRspPut %d "
2902 			"is bigger than rsp ring %d\n",
2903 			pring->ringno, le32_to_cpu(pgp->rspPutInx),
2904 			pring->sli.sli3.numRiocb);
2905 
2906 	phba->link_state = LPFC_HBA_ERROR;
2907 
2908 	/*
2909 	 * All error attention handlers are posted to
2910 	 * worker thread
2911 	 */
2912 	phba->work_ha |= HA_ERATT;
2913 	phba->work_hs = HS_FFER3;
2914 
2915 	lpfc_worker_wake_up(phba);
2916 
2917 	return;
2918 }
2919 
2920 /**
2921  * lpfc_poll_eratt - Error attention polling timer timeout handler
2922  * @ptr: Pointer to address of HBA context object.
2923  *
2924  * This function is invoked by the Error Attention polling timer when the
2925  * timer times out. It will check the SLI Error Attention register for
2926  * possible attention events. If so, it will post an Error Attention event
2927  * and wake up worker thread to process it. Otherwise, it will set up the
2928  * Error Attention polling timer for the next poll.
2929  **/
2930 void lpfc_poll_eratt(unsigned long ptr)
2931 {
2932 	struct lpfc_hba *phba;
2933 	uint32_t eratt = 0;
2934 	uint64_t sli_intr, cnt;
2935 
2936 	phba = (struct lpfc_hba *)ptr;
2937 
2938 	/* Here we will also keep track of interrupts per sec of the hba */
2939 	sli_intr = phba->sli.slistat.sli_intr;
2940 
2941 	if (phba->sli.slistat.sli_prev_intr > sli_intr)
2942 		cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2943 			sli_intr);
2944 	else
2945 		cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2946 
2947 	/* 64-bit integer division not supported on 32-bit x86 - use do_div */
2948 	do_div(cnt, phba->eratt_poll_interval);
2949 	phba->sli.slistat.sli_ips = cnt;
2950 
2951 	phba->sli.slistat.sli_prev_intr = sli_intr;
2952 
2953 	/* Check chip HA register for error event */
2954 	eratt = lpfc_sli_check_eratt(phba);
2955 
2956 	if (eratt)
2957 		/* Tell the worker thread there is work to do */
2958 		lpfc_worker_wake_up(phba);
2959 	else
2960 		/* Restart the timer for next eratt poll */
2961 		mod_timer(&phba->eratt_poll,
2962 			  jiffies +
2963 			  msecs_to_jiffies(1000 * phba->eratt_poll_interval));
2964 	return;
2965 }
2966 
2967 
2968 /**
2969  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2970  * @phba: Pointer to HBA context object.
2971  * @pring: Pointer to driver SLI ring object.
2972  * @mask: Host attention register mask for this ring.
2973  *
2974  * This function is called from the interrupt context when there is a ring
2975  * event for the fcp ring. The caller does not hold any lock.
2976  * The function processes each response iocb in the response ring until it
2977  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
2978  * LE bit set. The function will call the completion handler of the command iocb
2979  * if the response iocb indicates a completion for a command iocb or it is
2980  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2981  * function if this is an unsolicited iocb.
2982  * This routine presumes LPFC_FCP_RING handling and doesn't bother
2983  * to check it explicitly.
2984  */
2985 int
2986 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2987 				struct lpfc_sli_ring *pring, uint32_t mask)
2988 {
2989 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2990 	IOCB_t *irsp = NULL;
2991 	IOCB_t *entry = NULL;
2992 	struct lpfc_iocbq *cmdiocbq = NULL;
2993 	struct lpfc_iocbq rspiocbq;
2994 	uint32_t status;
2995 	uint32_t portRspPut, portRspMax;
2996 	int rc = 1;
2997 	lpfc_iocb_type type;
2998 	unsigned long iflag;
2999 	uint32_t rsp_cmpl = 0;
3000 
3001 	spin_lock_irqsave(&phba->hbalock, iflag);
3002 	pring->stats.iocb_event++;
3003 
3004 	/*
3005 	 * The next available response entry should never exceed the maximum
3006 	 * entries.  If it does, treat it as an adapter hardware error.
3007 	 */
3008 	portRspMax = pring->sli.sli3.numRiocb;
3009 	portRspPut = le32_to_cpu(pgp->rspPutInx);
3010 	if (unlikely(portRspPut >= portRspMax)) {
3011 		lpfc_sli_rsp_pointers_error(phba, pring);
3012 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3013 		return 1;
3014 	}
3015 	if (phba->fcp_ring_in_use) {
3016 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3017 		return 1;
3018 	} else
3019 		phba->fcp_ring_in_use = 1;
3020 
3021 	rmb();
3022 	while (pring->sli.sli3.rspidx != portRspPut) {
3023 		/*
3024 		 * Fetch an entry off the ring and copy it into a local data
3025 		 * structure.  The copy involves a byte-swap since the
3026 		 * network byte order and pci byte orders are different.
3027 		 */
3028 		entry = lpfc_resp_iocb(phba, pring);
3029 		phba->last_completion_time = jiffies;
3030 
3031 		if (++pring->sli.sli3.rspidx >= portRspMax)
3032 			pring->sli.sli3.rspidx = 0;
3033 
3034 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3035 				      (uint32_t *) &rspiocbq.iocb,
3036 				      phba->iocb_rsp_size);
3037 		INIT_LIST_HEAD(&(rspiocbq.list));
3038 		irsp = &rspiocbq.iocb;
3039 
3040 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3041 		pring->stats.iocb_rsp++;
3042 		rsp_cmpl++;
3043 
3044 		if (unlikely(irsp->ulpStatus)) {
3045 			/*
3046 			 * If resource errors reported from HBA, reduce
3047 			 * queuedepths of the SCSI device.
3048 			 */
3049 			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3050 			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3051 			     IOERR_NO_RESOURCES)) {
3052 				spin_unlock_irqrestore(&phba->hbalock, iflag);
3053 				phba->lpfc_rampdown_queue_depth(phba);
3054 				spin_lock_irqsave(&phba->hbalock, iflag);
3055 			}
3056 
3057 			/* Rsp ring <ringno> error: IOCB */
3058 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3059 					"0336 Rsp Ring %d error: IOCB Data: "
3060 					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
3061 					pring->ringno,
3062 					irsp->un.ulpWord[0],
3063 					irsp->un.ulpWord[1],
3064 					irsp->un.ulpWord[2],
3065 					irsp->un.ulpWord[3],
3066 					irsp->un.ulpWord[4],
3067 					irsp->un.ulpWord[5],
3068 					*(uint32_t *)&irsp->un1,
3069 					*((uint32_t *)&irsp->un1 + 1));
3070 		}
3071 
3072 		switch (type) {
3073 		case LPFC_ABORT_IOCB:
3074 		case LPFC_SOL_IOCB:
3075 			/*
3076 			 * Idle exchange closed via ABTS from port.  No iocb
3077 			 * resources need to be recovered.
3078 			 */
3079 			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3080 				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3081 						"0333 IOCB cmd 0x%x"
3082 						" processed. Skipping"
3083 						" completion\n",
3084 						irsp->ulpCommand);
3085 				break;
3086 			}
3087 
3088 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3089 							 &rspiocbq);
3090 			if (unlikely(!cmdiocbq))
3091 				break;
3092 			if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3093 				cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3094 			if (cmdiocbq->iocb_cmpl) {
3095 				spin_unlock_irqrestore(&phba->hbalock, iflag);
3096 				(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3097 						      &rspiocbq);
3098 				spin_lock_irqsave(&phba->hbalock, iflag);
3099 			}
3100 			break;
3101 		case LPFC_UNSOL_IOCB:
3102 			spin_unlock_irqrestore(&phba->hbalock, iflag);
3103 			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3104 			spin_lock_irqsave(&phba->hbalock, iflag);
3105 			break;
3106 		default:
3107 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3108 				char adaptermsg[LPFC_MAX_ADPTMSG];
3109 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3110 				memcpy(&adaptermsg[0], (uint8_t *) irsp,
3111 				       MAX_MSG_DATA);
3112 				dev_warn(&((phba->pcidev)->dev),
3113 					 "lpfc%d: %s\n",
3114 					 phba->brd_no, adaptermsg);
3115 			} else {
3116 				/* Unknown IOCB command */
3117 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3118 						"0334 Unknown IOCB command "
3119 						"Data: x%x, x%x x%x x%x x%x\n",
3120 						type, irsp->ulpCommand,
3121 						irsp->ulpStatus,
3122 						irsp->ulpIoTag,
3123 						irsp->ulpContext);
3124 			}
3125 			break;
3126 		}
3127 
3128 		/*
3129 		 * The response IOCB has been processed.  Update the ring
3130 		 * pointer in SLIM.  If the port response put pointer has not
3131 		 * been updated, sync the pgp->rspPutInx and fetch the new port
3132 		 * response put pointer.
3133 		 */
3134 		writel(pring->sli.sli3.rspidx,
3135 			&phba->host_gp[pring->ringno].rspGetInx);
3136 
3137 		if (pring->sli.sli3.rspidx == portRspPut)
3138 			portRspPut = le32_to_cpu(pgp->rspPutInx);
3139 	}
3140 
3141 	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3142 		pring->stats.iocb_rsp_full++;
3143 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3144 		writel(status, phba->CAregaddr);
3145 		readl(phba->CAregaddr);
3146 	}
3147 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3148 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3149 		pring->stats.iocb_cmd_empty++;
3150 
3151 		/* Force update of the local copy of cmdGetInx */
3152 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3153 		lpfc_sli_resume_iocb(phba, pring);
3154 
3155 		if ((pring->lpfc_sli_cmd_available))
3156 			(pring->lpfc_sli_cmd_available) (phba, pring);
3157 
3158 	}
3159 
3160 	phba->fcp_ring_in_use = 0;
3161 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3162 	return rc;
3163 }
3164 
3165 /**
3166  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3167  * @phba: Pointer to HBA context object.
3168  * @pring: Pointer to driver SLI ring object.
3169  * @rspiocbp: Pointer to driver response IOCB object.
3170  *
3171  * This function is called from the worker thread when there is a slow-path
3172  * response IOCB to process. This function chains all the response iocbs until
3173  * seeing the iocb with the LE bit set. The function will call
3174  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3175  * completion of a command iocb. The function will call the
3176  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3177  * The function frees the resources or calls the completion handler if this
3178  * iocb is an abort completion. The function returns NULL when the response
3179  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3180  * this function shall chain the iocb on to the iocb_continueq and return the
3181  * response iocb passed in.
3182  **/
3183 static struct lpfc_iocbq *
3184 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3185 			struct lpfc_iocbq *rspiocbp)
3186 {
3187 	struct lpfc_iocbq *saveq;
3188 	struct lpfc_iocbq *cmdiocbp;
3189 	struct lpfc_iocbq *next_iocb;
3190 	IOCB_t *irsp = NULL;
3191 	uint32_t free_saveq;
3192 	uint8_t iocb_cmd_type;
3193 	lpfc_iocb_type type;
3194 	unsigned long iflag;
3195 	int rc;
3196 
3197 	spin_lock_irqsave(&phba->hbalock, iflag);
3198 	/* First add the response iocb to the countinueq list */
3199 	list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3200 	pring->iocb_continueq_cnt++;
3201 
3202 	/* Now, determine whether the list is completed for processing */
3203 	irsp = &rspiocbp->iocb;
3204 	if (irsp->ulpLe) {
3205 		/*
3206 		 * By default, the driver expects to free all resources
3207 		 * associated with this iocb completion.
3208 		 */
3209 		free_saveq = 1;
3210 		saveq = list_get_first(&pring->iocb_continueq,
3211 				       struct lpfc_iocbq, list);
3212 		irsp = &(saveq->iocb);
3213 		list_del_init(&pring->iocb_continueq);
3214 		pring->iocb_continueq_cnt = 0;
3215 
3216 		pring->stats.iocb_rsp++;
3217 
3218 		/*
3219 		 * If resource errors reported from HBA, reduce
3220 		 * queuedepths of the SCSI device.
3221 		 */
3222 		if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3223 		    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3224 		     IOERR_NO_RESOURCES)) {
3225 			spin_unlock_irqrestore(&phba->hbalock, iflag);
3226 			phba->lpfc_rampdown_queue_depth(phba);
3227 			spin_lock_irqsave(&phba->hbalock, iflag);
3228 		}
3229 
3230 		if (irsp->ulpStatus) {
3231 			/* Rsp ring <ringno> error: IOCB */
3232 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3233 					"0328 Rsp Ring %d error: "
3234 					"IOCB Data: "
3235 					"x%x x%x x%x x%x "
3236 					"x%x x%x x%x x%x "
3237 					"x%x x%x x%x x%x "
3238 					"x%x x%x x%x x%x\n",
3239 					pring->ringno,
3240 					irsp->un.ulpWord[0],
3241 					irsp->un.ulpWord[1],
3242 					irsp->un.ulpWord[2],
3243 					irsp->un.ulpWord[3],
3244 					irsp->un.ulpWord[4],
3245 					irsp->un.ulpWord[5],
3246 					*(((uint32_t *) irsp) + 6),
3247 					*(((uint32_t *) irsp) + 7),
3248 					*(((uint32_t *) irsp) + 8),
3249 					*(((uint32_t *) irsp) + 9),
3250 					*(((uint32_t *) irsp) + 10),
3251 					*(((uint32_t *) irsp) + 11),
3252 					*(((uint32_t *) irsp) + 12),
3253 					*(((uint32_t *) irsp) + 13),
3254 					*(((uint32_t *) irsp) + 14),
3255 					*(((uint32_t *) irsp) + 15));
3256 		}
3257 
3258 		/*
3259 		 * Fetch the IOCB command type and call the correct completion
3260 		 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3261 		 * get freed back to the lpfc_iocb_list by the discovery
3262 		 * kernel thread.
3263 		 */
3264 		iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3265 		type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3266 		switch (type) {
3267 		case LPFC_SOL_IOCB:
3268 			spin_unlock_irqrestore(&phba->hbalock, iflag);
3269 			rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3270 			spin_lock_irqsave(&phba->hbalock, iflag);
3271 			break;
3272 
3273 		case LPFC_UNSOL_IOCB:
3274 			spin_unlock_irqrestore(&phba->hbalock, iflag);
3275 			rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3276 			spin_lock_irqsave(&phba->hbalock, iflag);
3277 			if (!rc)
3278 				free_saveq = 0;
3279 			break;
3280 
3281 		case LPFC_ABORT_IOCB:
3282 			cmdiocbp = NULL;
3283 			if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3284 				cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3285 								 saveq);
3286 			if (cmdiocbp) {
3287 				/* Call the specified completion routine */
3288 				if (cmdiocbp->iocb_cmpl) {
3289 					spin_unlock_irqrestore(&phba->hbalock,
3290 							       iflag);
3291 					(cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3292 							      saveq);
3293 					spin_lock_irqsave(&phba->hbalock,
3294 							  iflag);
3295 				} else
3296 					__lpfc_sli_release_iocbq(phba,
3297 								 cmdiocbp);
3298 			}
3299 			break;
3300 
3301 		case LPFC_UNKNOWN_IOCB:
3302 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3303 				char adaptermsg[LPFC_MAX_ADPTMSG];
3304 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3305 				memcpy(&adaptermsg[0], (uint8_t *)irsp,
3306 				       MAX_MSG_DATA);
3307 				dev_warn(&((phba->pcidev)->dev),
3308 					 "lpfc%d: %s\n",
3309 					 phba->brd_no, adaptermsg);
3310 			} else {
3311 				/* Unknown IOCB command */
3312 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3313 						"0335 Unknown IOCB "
3314 						"command Data: x%x "
3315 						"x%x x%x x%x\n",
3316 						irsp->ulpCommand,
3317 						irsp->ulpStatus,
3318 						irsp->ulpIoTag,
3319 						irsp->ulpContext);
3320 			}
3321 			break;
3322 		}
3323 
3324 		if (free_saveq) {
3325 			list_for_each_entry_safe(rspiocbp, next_iocb,
3326 						 &saveq->list, list) {
3327 				list_del_init(&rspiocbp->list);
3328 				__lpfc_sli_release_iocbq(phba, rspiocbp);
3329 			}
3330 			__lpfc_sli_release_iocbq(phba, saveq);
3331 		}
3332 		rspiocbp = NULL;
3333 	}
3334 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3335 	return rspiocbp;
3336 }
3337 
3338 /**
3339  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3340  * @phba: Pointer to HBA context object.
3341  * @pring: Pointer to driver SLI ring object.
3342  * @mask: Host attention register mask for this ring.
3343  *
3344  * This routine wraps the actual slow_ring event process routine from the
3345  * API jump table function pointer from the lpfc_hba struct.
3346  **/
3347 void
3348 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3349 				struct lpfc_sli_ring *pring, uint32_t mask)
3350 {
3351 	phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3352 }
3353 
3354 /**
3355  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3356  * @phba: Pointer to HBA context object.
3357  * @pring: Pointer to driver SLI ring object.
3358  * @mask: Host attention register mask for this ring.
3359  *
3360  * This function is called from the worker thread when there is a ring event
3361  * for non-fcp rings. The caller does not hold any lock. The function will
3362  * remove each response iocb in the response ring and calls the handle
3363  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3364  **/
3365 static void
3366 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3367 				   struct lpfc_sli_ring *pring, uint32_t mask)
3368 {
3369 	struct lpfc_pgp *pgp;
3370 	IOCB_t *entry;
3371 	IOCB_t *irsp = NULL;
3372 	struct lpfc_iocbq *rspiocbp = NULL;
3373 	uint32_t portRspPut, portRspMax;
3374 	unsigned long iflag;
3375 	uint32_t status;
3376 
3377 	pgp = &phba->port_gp[pring->ringno];
3378 	spin_lock_irqsave(&phba->hbalock, iflag);
3379 	pring->stats.iocb_event++;
3380 
3381 	/*
3382 	 * The next available response entry should never exceed the maximum
3383 	 * entries.  If it does, treat it as an adapter hardware error.
3384 	 */
3385 	portRspMax = pring->sli.sli3.numRiocb;
3386 	portRspPut = le32_to_cpu(pgp->rspPutInx);
3387 	if (portRspPut >= portRspMax) {
3388 		/*
3389 		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3390 		 * rsp ring <portRspMax>
3391 		 */
3392 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3393 				"0303 Ring %d handler: portRspPut %d "
3394 				"is bigger than rsp ring %d\n",
3395 				pring->ringno, portRspPut, portRspMax);
3396 
3397 		phba->link_state = LPFC_HBA_ERROR;
3398 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3399 
3400 		phba->work_hs = HS_FFER3;
3401 		lpfc_handle_eratt(phba);
3402 
3403 		return;
3404 	}
3405 
3406 	rmb();
3407 	while (pring->sli.sli3.rspidx != portRspPut) {
3408 		/*
3409 		 * Build a completion list and call the appropriate handler.
3410 		 * The process is to get the next available response iocb, get
3411 		 * a free iocb from the list, copy the response data into the
3412 		 * free iocb, insert to the continuation list, and update the
3413 		 * next response index to slim.  This process makes response
3414 		 * iocb's in the ring available to DMA as fast as possible but
3415 		 * pays a penalty for a copy operation.  Since the iocb is
3416 		 * only 32 bytes, this penalty is considered small relative to
3417 		 * the PCI reads for register values and a slim write.  When
3418 		 * the ulpLe field is set, the entire Command has been
3419 		 * received.
3420 		 */
3421 		entry = lpfc_resp_iocb(phba, pring);
3422 
3423 		phba->last_completion_time = jiffies;
3424 		rspiocbp = __lpfc_sli_get_iocbq(phba);
3425 		if (rspiocbp == NULL) {
3426 			printk(KERN_ERR "%s: out of buffers! Failing "
3427 			       "completion.\n", __func__);
3428 			break;
3429 		}
3430 
3431 		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3432 				      phba->iocb_rsp_size);
3433 		irsp = &rspiocbp->iocb;
3434 
3435 		if (++pring->sli.sli3.rspidx >= portRspMax)
3436 			pring->sli.sli3.rspidx = 0;
3437 
3438 		if (pring->ringno == LPFC_ELS_RING) {
3439 			lpfc_debugfs_slow_ring_trc(phba,
3440 			"IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3441 				*(((uint32_t *) irsp) + 4),
3442 				*(((uint32_t *) irsp) + 6),
3443 				*(((uint32_t *) irsp) + 7));
3444 		}
3445 
3446 		writel(pring->sli.sli3.rspidx,
3447 			&phba->host_gp[pring->ringno].rspGetInx);
3448 
3449 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3450 		/* Handle the response IOCB */
3451 		rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3452 		spin_lock_irqsave(&phba->hbalock, iflag);
3453 
3454 		/*
3455 		 * If the port response put pointer has not been updated, sync
3456 		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3457 		 * response put pointer.
3458 		 */
3459 		if (pring->sli.sli3.rspidx == portRspPut) {
3460 			portRspPut = le32_to_cpu(pgp->rspPutInx);
3461 		}
3462 	} /* while (pring->sli.sli3.rspidx != portRspPut) */
3463 
3464 	if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3465 		/* At least one response entry has been freed */
3466 		pring->stats.iocb_rsp_full++;
3467 		/* SET RxRE_RSP in Chip Att register */
3468 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3469 		writel(status, phba->CAregaddr);
3470 		readl(phba->CAregaddr); /* flush */
3471 	}
3472 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3473 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3474 		pring->stats.iocb_cmd_empty++;
3475 
3476 		/* Force update of the local copy of cmdGetInx */
3477 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3478 		lpfc_sli_resume_iocb(phba, pring);
3479 
3480 		if ((pring->lpfc_sli_cmd_available))
3481 			(pring->lpfc_sli_cmd_available) (phba, pring);
3482 
3483 	}
3484 
3485 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3486 	return;
3487 }
3488 
3489 /**
3490  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3491  * @phba: Pointer to HBA context object.
3492  * @pring: Pointer to driver SLI ring object.
3493  * @mask: Host attention register mask for this ring.
3494  *
3495  * This function is called from the worker thread when there is a pending
3496  * ELS response iocb on the driver internal slow-path response iocb worker
3497  * queue. The caller does not hold any lock. The function will remove each
3498  * response iocb from the response worker queue and calls the handle
3499  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3500  **/
3501 static void
3502 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3503 				   struct lpfc_sli_ring *pring, uint32_t mask)
3504 {
3505 	struct lpfc_iocbq *irspiocbq;
3506 	struct hbq_dmabuf *dmabuf;
3507 	struct lpfc_cq_event *cq_event;
3508 	unsigned long iflag;
3509 
3510 	spin_lock_irqsave(&phba->hbalock, iflag);
3511 	phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3512 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3513 	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3514 		/* Get the response iocb from the head of work queue */
3515 		spin_lock_irqsave(&phba->hbalock, iflag);
3516 		list_remove_head(&phba->sli4_hba.sp_queue_event,
3517 				 cq_event, struct lpfc_cq_event, list);
3518 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3519 
3520 		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3521 		case CQE_CODE_COMPL_WQE:
3522 			irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3523 						 cq_event);
3524 			/* Translate ELS WCQE to response IOCBQ */
3525 			irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3526 								   irspiocbq);
3527 			if (irspiocbq)
3528 				lpfc_sli_sp_handle_rspiocb(phba, pring,
3529 							   irspiocbq);
3530 			break;
3531 		case CQE_CODE_RECEIVE:
3532 		case CQE_CODE_RECEIVE_V1:
3533 			dmabuf = container_of(cq_event, struct hbq_dmabuf,
3534 					      cq_event);
3535 			lpfc_sli4_handle_received_buffer(phba, dmabuf);
3536 			break;
3537 		default:
3538 			break;
3539 		}
3540 	}
3541 }
3542 
3543 /**
3544  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3545  * @phba: Pointer to HBA context object.
3546  * @pring: Pointer to driver SLI ring object.
3547  *
3548  * This function aborts all iocbs in the given ring and frees all the iocb
3549  * objects in txq. This function issues an abort iocb for all the iocb commands
3550  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3551  * the return of this function. The caller is not required to hold any locks.
3552  **/
3553 void
3554 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3555 {
3556 	LIST_HEAD(completions);
3557 	struct lpfc_iocbq *iocb, *next_iocb;
3558 
3559 	if (pring->ringno == LPFC_ELS_RING) {
3560 		lpfc_fabric_abort_hba(phba);
3561 	}
3562 
3563 	/* Error everything on txq and txcmplq
3564 	 * First do the txq.
3565 	 */
3566 	if (phba->sli_rev >= LPFC_SLI_REV4) {
3567 		spin_lock_irq(&pring->ring_lock);
3568 		list_splice_init(&pring->txq, &completions);
3569 		pring->txq_cnt = 0;
3570 		spin_unlock_irq(&pring->ring_lock);
3571 
3572 		spin_lock_irq(&phba->hbalock);
3573 		/* Next issue ABTS for everything on the txcmplq */
3574 		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3575 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3576 		spin_unlock_irq(&phba->hbalock);
3577 	} else {
3578 		spin_lock_irq(&phba->hbalock);
3579 		list_splice_init(&pring->txq, &completions);
3580 		pring->txq_cnt = 0;
3581 
3582 		/* Next issue ABTS for everything on the txcmplq */
3583 		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3584 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3585 		spin_unlock_irq(&phba->hbalock);
3586 	}
3587 
3588 	/* Cancel all the IOCBs from the completions list */
3589 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3590 			      IOERR_SLI_ABORTED);
3591 }
3592 
3593 /**
3594  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3595  * @phba: Pointer to HBA context object.
3596  * @pring: Pointer to driver SLI ring object.
3597  *
3598  * This function aborts all iocbs in FCP rings and frees all the iocb
3599  * objects in txq. This function issues an abort iocb for all the iocb commands
3600  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3601  * the return of this function. The caller is not required to hold any locks.
3602  **/
3603 void
3604 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3605 {
3606 	struct lpfc_sli *psli = &phba->sli;
3607 	struct lpfc_sli_ring  *pring;
3608 	uint32_t i;
3609 
3610 	/* Look on all the FCP Rings for the iotag */
3611 	if (phba->sli_rev >= LPFC_SLI_REV4) {
3612 		for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3613 			pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3614 			lpfc_sli_abort_iocb_ring(phba, pring);
3615 		}
3616 	} else {
3617 		pring = &psli->ring[psli->fcp_ring];
3618 		lpfc_sli_abort_iocb_ring(phba, pring);
3619 	}
3620 }
3621 
3622 
3623 /**
3624  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3625  * @phba: Pointer to HBA context object.
3626  *
3627  * This function flushes all iocbs in the fcp ring and frees all the iocb
3628  * objects in txq and txcmplq. This function will not issue abort iocbs
3629  * for all the iocb commands in txcmplq, they will just be returned with
3630  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3631  * slot has been permanently disabled.
3632  **/
3633 void
3634 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3635 {
3636 	LIST_HEAD(txq);
3637 	LIST_HEAD(txcmplq);
3638 	struct lpfc_sli *psli = &phba->sli;
3639 	struct lpfc_sli_ring  *pring;
3640 	uint32_t i;
3641 
3642 	spin_lock_irq(&phba->hbalock);
3643 	/* Indicate the I/O queues are flushed */
3644 	phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3645 	spin_unlock_irq(&phba->hbalock);
3646 
3647 	/* Look on all the FCP Rings for the iotag */
3648 	if (phba->sli_rev >= LPFC_SLI_REV4) {
3649 		for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3650 			pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3651 
3652 			spin_lock_irq(&pring->ring_lock);
3653 			/* Retrieve everything on txq */
3654 			list_splice_init(&pring->txq, &txq);
3655 			/* Retrieve everything on the txcmplq */
3656 			list_splice_init(&pring->txcmplq, &txcmplq);
3657 			pring->txq_cnt = 0;
3658 			pring->txcmplq_cnt = 0;
3659 			spin_unlock_irq(&pring->ring_lock);
3660 
3661 			/* Flush the txq */
3662 			lpfc_sli_cancel_iocbs(phba, &txq,
3663 					      IOSTAT_LOCAL_REJECT,
3664 					      IOERR_SLI_DOWN);
3665 			/* Flush the txcmpq */
3666 			lpfc_sli_cancel_iocbs(phba, &txcmplq,
3667 					      IOSTAT_LOCAL_REJECT,
3668 					      IOERR_SLI_DOWN);
3669 		}
3670 	} else {
3671 		pring = &psli->ring[psli->fcp_ring];
3672 
3673 		spin_lock_irq(&phba->hbalock);
3674 		/* Retrieve everything on txq */
3675 		list_splice_init(&pring->txq, &txq);
3676 		/* Retrieve everything on the txcmplq */
3677 		list_splice_init(&pring->txcmplq, &txcmplq);
3678 		pring->txq_cnt = 0;
3679 		pring->txcmplq_cnt = 0;
3680 		spin_unlock_irq(&phba->hbalock);
3681 
3682 		/* Flush the txq */
3683 		lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3684 				      IOERR_SLI_DOWN);
3685 		/* Flush the txcmpq */
3686 		lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3687 				      IOERR_SLI_DOWN);
3688 	}
3689 }
3690 
3691 /**
3692  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3693  * @phba: Pointer to HBA context object.
3694  * @mask: Bit mask to be checked.
3695  *
3696  * This function reads the host status register and compares
3697  * with the provided bit mask to check if HBA completed
3698  * the restart. This function will wait in a loop for the
3699  * HBA to complete restart. If the HBA does not restart within
3700  * 15 iterations, the function will reset the HBA again. The
3701  * function returns 1 when HBA fail to restart otherwise returns
3702  * zero.
3703  **/
3704 static int
3705 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3706 {
3707 	uint32_t status;
3708 	int i = 0;
3709 	int retval = 0;
3710 
3711 	/* Read the HBA Host Status Register */
3712 	if (lpfc_readl(phba->HSregaddr, &status))
3713 		return 1;
3714 
3715 	/*
3716 	 * Check status register every 100ms for 5 retries, then every
3717 	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3718 	 * every 2.5 sec for 4.
3719 	 * Break our of the loop if errors occurred during init.
3720 	 */
3721 	while (((status & mask) != mask) &&
3722 	       !(status & HS_FFERM) &&
3723 	       i++ < 20) {
3724 
3725 		if (i <= 5)
3726 			msleep(10);
3727 		else if (i <= 10)
3728 			msleep(500);
3729 		else
3730 			msleep(2500);
3731 
3732 		if (i == 15) {
3733 				/* Do post */
3734 			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3735 			lpfc_sli_brdrestart(phba);
3736 		}
3737 		/* Read the HBA Host Status Register */
3738 		if (lpfc_readl(phba->HSregaddr, &status)) {
3739 			retval = 1;
3740 			break;
3741 		}
3742 	}
3743 
3744 	/* Check to see if any errors occurred during init */
3745 	if ((status & HS_FFERM) || (i >= 20)) {
3746 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3747 				"2751 Adapter failed to restart, "
3748 				"status reg x%x, FW Data: A8 x%x AC x%x\n",
3749 				status,
3750 				readl(phba->MBslimaddr + 0xa8),
3751 				readl(phba->MBslimaddr + 0xac));
3752 		phba->link_state = LPFC_HBA_ERROR;
3753 		retval = 1;
3754 	}
3755 
3756 	return retval;
3757 }
3758 
3759 /**
3760  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3761  * @phba: Pointer to HBA context object.
3762  * @mask: Bit mask to be checked.
3763  *
3764  * This function checks the host status register to check if HBA is
3765  * ready. This function will wait in a loop for the HBA to be ready
3766  * If the HBA is not ready , the function will will reset the HBA PCI
3767  * function again. The function returns 1 when HBA fail to be ready
3768  * otherwise returns zero.
3769  **/
3770 static int
3771 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3772 {
3773 	uint32_t status;
3774 	int retval = 0;
3775 
3776 	/* Read the HBA Host Status Register */
3777 	status = lpfc_sli4_post_status_check(phba);
3778 
3779 	if (status) {
3780 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3781 		lpfc_sli_brdrestart(phba);
3782 		status = lpfc_sli4_post_status_check(phba);
3783 	}
3784 
3785 	/* Check to see if any errors occurred during init */
3786 	if (status) {
3787 		phba->link_state = LPFC_HBA_ERROR;
3788 		retval = 1;
3789 	} else
3790 		phba->sli4_hba.intr_enable = 0;
3791 
3792 	return retval;
3793 }
3794 
3795 /**
3796  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3797  * @phba: Pointer to HBA context object.
3798  * @mask: Bit mask to be checked.
3799  *
3800  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3801  * from the API jump table function pointer from the lpfc_hba struct.
3802  **/
3803 int
3804 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3805 {
3806 	return phba->lpfc_sli_brdready(phba, mask);
3807 }
3808 
3809 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3810 
3811 /**
3812  * lpfc_reset_barrier - Make HBA ready for HBA reset
3813  * @phba: Pointer to HBA context object.
3814  *
3815  * This function is called before resetting an HBA. This function is called
3816  * with hbalock held and requests HBA to quiesce DMAs before a reset.
3817  **/
3818 void lpfc_reset_barrier(struct lpfc_hba *phba)
3819 {
3820 	uint32_t __iomem *resp_buf;
3821 	uint32_t __iomem *mbox_buf;
3822 	volatile uint32_t mbox;
3823 	uint32_t hc_copy, ha_copy, resp_data;
3824 	int  i;
3825 	uint8_t hdrtype;
3826 
3827 	lockdep_assert_held(&phba->hbalock);
3828 
3829 	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3830 	if (hdrtype != 0x80 ||
3831 	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3832 	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3833 		return;
3834 
3835 	/*
3836 	 * Tell the other part of the chip to suspend temporarily all
3837 	 * its DMA activity.
3838 	 */
3839 	resp_buf = phba->MBslimaddr;
3840 
3841 	/* Disable the error attention */
3842 	if (lpfc_readl(phba->HCregaddr, &hc_copy))
3843 		return;
3844 	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3845 	readl(phba->HCregaddr); /* flush */
3846 	phba->link_flag |= LS_IGNORE_ERATT;
3847 
3848 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
3849 		return;
3850 	if (ha_copy & HA_ERATT) {
3851 		/* Clear Chip error bit */
3852 		writel(HA_ERATT, phba->HAregaddr);
3853 		phba->pport->stopped = 1;
3854 	}
3855 
3856 	mbox = 0;
3857 	((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3858 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3859 
3860 	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3861 	mbox_buf = phba->MBslimaddr;
3862 	writel(mbox, mbox_buf);
3863 
3864 	for (i = 0; i < 50; i++) {
3865 		if (lpfc_readl((resp_buf + 1), &resp_data))
3866 			return;
3867 		if (resp_data != ~(BARRIER_TEST_PATTERN))
3868 			mdelay(1);
3869 		else
3870 			break;
3871 	}
3872 	resp_data = 0;
3873 	if (lpfc_readl((resp_buf + 1), &resp_data))
3874 		return;
3875 	if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
3876 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3877 		    phba->pport->stopped)
3878 			goto restore_hc;
3879 		else
3880 			goto clear_errat;
3881 	}
3882 
3883 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3884 	resp_data = 0;
3885 	for (i = 0; i < 500; i++) {
3886 		if (lpfc_readl(resp_buf, &resp_data))
3887 			return;
3888 		if (resp_data != mbox)
3889 			mdelay(1);
3890 		else
3891 			break;
3892 	}
3893 
3894 clear_errat:
3895 
3896 	while (++i < 500) {
3897 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
3898 			return;
3899 		if (!(ha_copy & HA_ERATT))
3900 			mdelay(1);
3901 		else
3902 			break;
3903 	}
3904 
3905 	if (readl(phba->HAregaddr) & HA_ERATT) {
3906 		writel(HA_ERATT, phba->HAregaddr);
3907 		phba->pport->stopped = 1;
3908 	}
3909 
3910 restore_hc:
3911 	phba->link_flag &= ~LS_IGNORE_ERATT;
3912 	writel(hc_copy, phba->HCregaddr);
3913 	readl(phba->HCregaddr); /* flush */
3914 }
3915 
3916 /**
3917  * lpfc_sli_brdkill - Issue a kill_board mailbox command
3918  * @phba: Pointer to HBA context object.
3919  *
3920  * This function issues a kill_board mailbox command and waits for
3921  * the error attention interrupt. This function is called for stopping
3922  * the firmware processing. The caller is not required to hold any
3923  * locks. This function calls lpfc_hba_down_post function to free
3924  * any pending commands after the kill. The function will return 1 when it
3925  * fails to kill the board else will return 0.
3926  **/
3927 int
3928 lpfc_sli_brdkill(struct lpfc_hba *phba)
3929 {
3930 	struct lpfc_sli *psli;
3931 	LPFC_MBOXQ_t *pmb;
3932 	uint32_t status;
3933 	uint32_t ha_copy;
3934 	int retval;
3935 	int i = 0;
3936 
3937 	psli = &phba->sli;
3938 
3939 	/* Kill HBA */
3940 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3941 			"0329 Kill HBA Data: x%x x%x\n",
3942 			phba->pport->port_state, psli->sli_flag);
3943 
3944 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3945 	if (!pmb)
3946 		return 1;
3947 
3948 	/* Disable the error attention */
3949 	spin_lock_irq(&phba->hbalock);
3950 	if (lpfc_readl(phba->HCregaddr, &status)) {
3951 		spin_unlock_irq(&phba->hbalock);
3952 		mempool_free(pmb, phba->mbox_mem_pool);
3953 		return 1;
3954 	}
3955 	status &= ~HC_ERINT_ENA;
3956 	writel(status, phba->HCregaddr);
3957 	readl(phba->HCregaddr); /* flush */
3958 	phba->link_flag |= LS_IGNORE_ERATT;
3959 	spin_unlock_irq(&phba->hbalock);
3960 
3961 	lpfc_kill_board(phba, pmb);
3962 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3963 	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3964 
3965 	if (retval != MBX_SUCCESS) {
3966 		if (retval != MBX_BUSY)
3967 			mempool_free(pmb, phba->mbox_mem_pool);
3968 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3969 				"2752 KILL_BOARD command failed retval %d\n",
3970 				retval);
3971 		spin_lock_irq(&phba->hbalock);
3972 		phba->link_flag &= ~LS_IGNORE_ERATT;
3973 		spin_unlock_irq(&phba->hbalock);
3974 		return 1;
3975 	}
3976 
3977 	spin_lock_irq(&phba->hbalock);
3978 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3979 	spin_unlock_irq(&phba->hbalock);
3980 
3981 	mempool_free(pmb, phba->mbox_mem_pool);
3982 
3983 	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3984 	 * attention every 100ms for 3 seconds. If we don't get ERATT after
3985 	 * 3 seconds we still set HBA_ERROR state because the status of the
3986 	 * board is now undefined.
3987 	 */
3988 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
3989 		return 1;
3990 	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3991 		mdelay(100);
3992 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
3993 			return 1;
3994 	}
3995 
3996 	del_timer_sync(&psli->mbox_tmo);
3997 	if (ha_copy & HA_ERATT) {
3998 		writel(HA_ERATT, phba->HAregaddr);
3999 		phba->pport->stopped = 1;
4000 	}
4001 	spin_lock_irq(&phba->hbalock);
4002 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4003 	psli->mbox_active = NULL;
4004 	phba->link_flag &= ~LS_IGNORE_ERATT;
4005 	spin_unlock_irq(&phba->hbalock);
4006 
4007 	lpfc_hba_down_post(phba);
4008 	phba->link_state = LPFC_HBA_ERROR;
4009 
4010 	return ha_copy & HA_ERATT ? 0 : 1;
4011 }
4012 
4013 /**
4014  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4015  * @phba: Pointer to HBA context object.
4016  *
4017  * This function resets the HBA by writing HC_INITFF to the control
4018  * register. After the HBA resets, this function resets all the iocb ring
4019  * indices. This function disables PCI layer parity checking during
4020  * the reset.
4021  * This function returns 0 always.
4022  * The caller is not required to hold any locks.
4023  **/
4024 int
4025 lpfc_sli_brdreset(struct lpfc_hba *phba)
4026 {
4027 	struct lpfc_sli *psli;
4028 	struct lpfc_sli_ring *pring;
4029 	uint16_t cfg_value;
4030 	int i;
4031 
4032 	psli = &phba->sli;
4033 
4034 	/* Reset HBA */
4035 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4036 			"0325 Reset HBA Data: x%x x%x\n",
4037 			phba->pport->port_state, psli->sli_flag);
4038 
4039 	/* perform board reset */
4040 	phba->fc_eventTag = 0;
4041 	phba->link_events = 0;
4042 	phba->pport->fc_myDID = 0;
4043 	phba->pport->fc_prevDID = 0;
4044 
4045 	/* Turn off parity checking and serr during the physical reset */
4046 	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4047 	pci_write_config_word(phba->pcidev, PCI_COMMAND,
4048 			      (cfg_value &
4049 			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4050 
4051 	psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4052 
4053 	/* Now toggle INITFF bit in the Host Control Register */
4054 	writel(HC_INITFF, phba->HCregaddr);
4055 	mdelay(1);
4056 	readl(phba->HCregaddr); /* flush */
4057 	writel(0, phba->HCregaddr);
4058 	readl(phba->HCregaddr); /* flush */
4059 
4060 	/* Restore PCI cmd register */
4061 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4062 
4063 	/* Initialize relevant SLI info */
4064 	for (i = 0; i < psli->num_rings; i++) {
4065 		pring = &psli->ring[i];
4066 		pring->flag = 0;
4067 		pring->sli.sli3.rspidx = 0;
4068 		pring->sli.sli3.next_cmdidx  = 0;
4069 		pring->sli.sli3.local_getidx = 0;
4070 		pring->sli.sli3.cmdidx = 0;
4071 		pring->missbufcnt = 0;
4072 	}
4073 
4074 	phba->link_state = LPFC_WARM_START;
4075 	return 0;
4076 }
4077 
4078 /**
4079  * lpfc_sli4_brdreset - Reset a sli-4 HBA
4080  * @phba: Pointer to HBA context object.
4081  *
4082  * This function resets a SLI4 HBA. This function disables PCI layer parity
4083  * checking during resets the device. The caller is not required to hold
4084  * any locks.
4085  *
4086  * This function returns 0 always.
4087  **/
4088 int
4089 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4090 {
4091 	struct lpfc_sli *psli = &phba->sli;
4092 	uint16_t cfg_value;
4093 	int rc = 0;
4094 
4095 	/* Reset HBA */
4096 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4097 			"0295 Reset HBA Data: x%x x%x x%x\n",
4098 			phba->pport->port_state, psli->sli_flag,
4099 			phba->hba_flag);
4100 
4101 	/* perform board reset */
4102 	phba->fc_eventTag = 0;
4103 	phba->link_events = 0;
4104 	phba->pport->fc_myDID = 0;
4105 	phba->pport->fc_prevDID = 0;
4106 
4107 	spin_lock_irq(&phba->hbalock);
4108 	psli->sli_flag &= ~(LPFC_PROCESS_LA);
4109 	phba->fcf.fcf_flag = 0;
4110 	spin_unlock_irq(&phba->hbalock);
4111 
4112 	/* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4113 	if (phba->hba_flag & HBA_FW_DUMP_OP) {
4114 		phba->hba_flag &= ~HBA_FW_DUMP_OP;
4115 		return rc;
4116 	}
4117 
4118 	/* Now physically reset the device */
4119 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4120 			"0389 Performing PCI function reset!\n");
4121 
4122 	/* Turn off parity checking and serr during the physical reset */
4123 	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4124 	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4125 			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4126 
4127 	/* Perform FCoE PCI function reset before freeing queue memory */
4128 	rc = lpfc_pci_function_reset(phba);
4129 	lpfc_sli4_queue_destroy(phba);
4130 
4131 	/* Restore PCI cmd register */
4132 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4133 
4134 	return rc;
4135 }
4136 
4137 /**
4138  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4139  * @phba: Pointer to HBA context object.
4140  *
4141  * This function is called in the SLI initialization code path to
4142  * restart the HBA. The caller is not required to hold any lock.
4143  * This function writes MBX_RESTART mailbox command to the SLIM and
4144  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4145  * function to free any pending commands. The function enables
4146  * POST only during the first initialization. The function returns zero.
4147  * The function does not guarantee completion of MBX_RESTART mailbox
4148  * command before the return of this function.
4149  **/
4150 static int
4151 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4152 {
4153 	MAILBOX_t *mb;
4154 	struct lpfc_sli *psli;
4155 	volatile uint32_t word0;
4156 	void __iomem *to_slim;
4157 	uint32_t hba_aer_enabled;
4158 
4159 	spin_lock_irq(&phba->hbalock);
4160 
4161 	/* Take PCIe device Advanced Error Reporting (AER) state */
4162 	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4163 
4164 	psli = &phba->sli;
4165 
4166 	/* Restart HBA */
4167 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4168 			"0337 Restart HBA Data: x%x x%x\n",
4169 			phba->pport->port_state, psli->sli_flag);
4170 
4171 	word0 = 0;
4172 	mb = (MAILBOX_t *) &word0;
4173 	mb->mbxCommand = MBX_RESTART;
4174 	mb->mbxHc = 1;
4175 
4176 	lpfc_reset_barrier(phba);
4177 
4178 	to_slim = phba->MBslimaddr;
4179 	writel(*(uint32_t *) mb, to_slim);
4180 	readl(to_slim); /* flush */
4181 
4182 	/* Only skip post after fc_ffinit is completed */
4183 	if (phba->pport->port_state)
4184 		word0 = 1;	/* This is really setting up word1 */
4185 	else
4186 		word0 = 0;	/* This is really setting up word1 */
4187 	to_slim = phba->MBslimaddr + sizeof (uint32_t);
4188 	writel(*(uint32_t *) mb, to_slim);
4189 	readl(to_slim); /* flush */
4190 
4191 	lpfc_sli_brdreset(phba);
4192 	phba->pport->stopped = 0;
4193 	phba->link_state = LPFC_INIT_START;
4194 	phba->hba_flag = 0;
4195 	spin_unlock_irq(&phba->hbalock);
4196 
4197 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4198 	psli->stats_start = get_seconds();
4199 
4200 	/* Give the INITFF and Post time to settle. */
4201 	mdelay(100);
4202 
4203 	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
4204 	if (hba_aer_enabled)
4205 		pci_disable_pcie_error_reporting(phba->pcidev);
4206 
4207 	lpfc_hba_down_post(phba);
4208 
4209 	return 0;
4210 }
4211 
4212 /**
4213  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4214  * @phba: Pointer to HBA context object.
4215  *
4216  * This function is called in the SLI initialization code path to restart
4217  * a SLI4 HBA. The caller is not required to hold any lock.
4218  * At the end of the function, it calls lpfc_hba_down_post function to
4219  * free any pending commands.
4220  **/
4221 static int
4222 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4223 {
4224 	struct lpfc_sli *psli = &phba->sli;
4225 	uint32_t hba_aer_enabled;
4226 	int rc;
4227 
4228 	/* Restart HBA */
4229 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4230 			"0296 Restart HBA Data: x%x x%x\n",
4231 			phba->pport->port_state, psli->sli_flag);
4232 
4233 	/* Take PCIe device Advanced Error Reporting (AER) state */
4234 	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4235 
4236 	rc = lpfc_sli4_brdreset(phba);
4237 
4238 	spin_lock_irq(&phba->hbalock);
4239 	phba->pport->stopped = 0;
4240 	phba->link_state = LPFC_INIT_START;
4241 	phba->hba_flag = 0;
4242 	spin_unlock_irq(&phba->hbalock);
4243 
4244 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4245 	psli->stats_start = get_seconds();
4246 
4247 	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
4248 	if (hba_aer_enabled)
4249 		pci_disable_pcie_error_reporting(phba->pcidev);
4250 
4251 	lpfc_hba_down_post(phba);
4252 
4253 	return rc;
4254 }
4255 
4256 /**
4257  * lpfc_sli_brdrestart - Wrapper func for restarting hba
4258  * @phba: Pointer to HBA context object.
4259  *
4260  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4261  * API jump table function pointer from the lpfc_hba struct.
4262 **/
4263 int
4264 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4265 {
4266 	return phba->lpfc_sli_brdrestart(phba);
4267 }
4268 
4269 /**
4270  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4271  * @phba: Pointer to HBA context object.
4272  *
4273  * This function is called after a HBA restart to wait for successful
4274  * restart of the HBA. Successful restart of the HBA is indicated by
4275  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4276  * iteration, the function will restart the HBA again. The function returns
4277  * zero if HBA successfully restarted else returns negative error code.
4278  **/
4279 static int
4280 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4281 {
4282 	uint32_t status, i = 0;
4283 
4284 	/* Read the HBA Host Status Register */
4285 	if (lpfc_readl(phba->HSregaddr, &status))
4286 		return -EIO;
4287 
4288 	/* Check status register to see what current state is */
4289 	i = 0;
4290 	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4291 
4292 		/* Check every 10ms for 10 retries, then every 100ms for 90
4293 		 * retries, then every 1 sec for 50 retires for a total of
4294 		 * ~60 seconds before reset the board again and check every
4295 		 * 1 sec for 50 retries. The up to 60 seconds before the
4296 		 * board ready is required by the Falcon FIPS zeroization
4297 		 * complete, and any reset the board in between shall cause
4298 		 * restart of zeroization, further delay the board ready.
4299 		 */
4300 		if (i++ >= 200) {
4301 			/* Adapter failed to init, timeout, status reg
4302 			   <status> */
4303 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4304 					"0436 Adapter failed to init, "
4305 					"timeout, status reg x%x, "
4306 					"FW Data: A8 x%x AC x%x\n", status,
4307 					readl(phba->MBslimaddr + 0xa8),
4308 					readl(phba->MBslimaddr + 0xac));
4309 			phba->link_state = LPFC_HBA_ERROR;
4310 			return -ETIMEDOUT;
4311 		}
4312 
4313 		/* Check to see if any errors occurred during init */
4314 		if (status & HS_FFERM) {
4315 			/* ERROR: During chipset initialization */
4316 			/* Adapter failed to init, chipset, status reg
4317 			   <status> */
4318 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4319 					"0437 Adapter failed to init, "
4320 					"chipset, status reg x%x, "
4321 					"FW Data: A8 x%x AC x%x\n", status,
4322 					readl(phba->MBslimaddr + 0xa8),
4323 					readl(phba->MBslimaddr + 0xac));
4324 			phba->link_state = LPFC_HBA_ERROR;
4325 			return -EIO;
4326 		}
4327 
4328 		if (i <= 10)
4329 			msleep(10);
4330 		else if (i <= 100)
4331 			msleep(100);
4332 		else
4333 			msleep(1000);
4334 
4335 		if (i == 150) {
4336 			/* Do post */
4337 			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4338 			lpfc_sli_brdrestart(phba);
4339 		}
4340 		/* Read the HBA Host Status Register */
4341 		if (lpfc_readl(phba->HSregaddr, &status))
4342 			return -EIO;
4343 	}
4344 
4345 	/* Check to see if any errors occurred during init */
4346 	if (status & HS_FFERM) {
4347 		/* ERROR: During chipset initialization */
4348 		/* Adapter failed to init, chipset, status reg <status> */
4349 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4350 				"0438 Adapter failed to init, chipset, "
4351 				"status reg x%x, "
4352 				"FW Data: A8 x%x AC x%x\n", status,
4353 				readl(phba->MBslimaddr + 0xa8),
4354 				readl(phba->MBslimaddr + 0xac));
4355 		phba->link_state = LPFC_HBA_ERROR;
4356 		return -EIO;
4357 	}
4358 
4359 	/* Clear all interrupt enable conditions */
4360 	writel(0, phba->HCregaddr);
4361 	readl(phba->HCregaddr); /* flush */
4362 
4363 	/* setup host attn register */
4364 	writel(0xffffffff, phba->HAregaddr);
4365 	readl(phba->HAregaddr); /* flush */
4366 	return 0;
4367 }
4368 
4369 /**
4370  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4371  *
4372  * This function calculates and returns the number of HBQs required to be
4373  * configured.
4374  **/
4375 int
4376 lpfc_sli_hbq_count(void)
4377 {
4378 	return ARRAY_SIZE(lpfc_hbq_defs);
4379 }
4380 
4381 /**
4382  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4383  *
4384  * This function adds the number of hbq entries in every HBQ to get
4385  * the total number of hbq entries required for the HBA and returns
4386  * the total count.
4387  **/
4388 static int
4389 lpfc_sli_hbq_entry_count(void)
4390 {
4391 	int  hbq_count = lpfc_sli_hbq_count();
4392 	int  count = 0;
4393 	int  i;
4394 
4395 	for (i = 0; i < hbq_count; ++i)
4396 		count += lpfc_hbq_defs[i]->entry_count;
4397 	return count;
4398 }
4399 
4400 /**
4401  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4402  *
4403  * This function calculates amount of memory required for all hbq entries
4404  * to be configured and returns the total memory required.
4405  **/
4406 int
4407 lpfc_sli_hbq_size(void)
4408 {
4409 	return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4410 }
4411 
4412 /**
4413  * lpfc_sli_hbq_setup - configure and initialize HBQs
4414  * @phba: Pointer to HBA context object.
4415  *
4416  * This function is called during the SLI initialization to configure
4417  * all the HBQs and post buffers to the HBQ. The caller is not
4418  * required to hold any locks. This function will return zero if successful
4419  * else it will return negative error code.
4420  **/
4421 static int
4422 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4423 {
4424 	int  hbq_count = lpfc_sli_hbq_count();
4425 	LPFC_MBOXQ_t *pmb;
4426 	MAILBOX_t *pmbox;
4427 	uint32_t hbqno;
4428 	uint32_t hbq_entry_index;
4429 
4430 				/* Get a Mailbox buffer to setup mailbox
4431 				 * commands for HBA initialization
4432 				 */
4433 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4434 
4435 	if (!pmb)
4436 		return -ENOMEM;
4437 
4438 	pmbox = &pmb->u.mb;
4439 
4440 	/* Initialize the struct lpfc_sli_hbq structure for each hbq */
4441 	phba->link_state = LPFC_INIT_MBX_CMDS;
4442 	phba->hbq_in_use = 1;
4443 
4444 	hbq_entry_index = 0;
4445 	for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4446 		phba->hbqs[hbqno].next_hbqPutIdx = 0;
4447 		phba->hbqs[hbqno].hbqPutIdx      = 0;
4448 		phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4449 		phba->hbqs[hbqno].entry_count =
4450 			lpfc_hbq_defs[hbqno]->entry_count;
4451 		lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4452 			hbq_entry_index, pmb);
4453 		hbq_entry_index += phba->hbqs[hbqno].entry_count;
4454 
4455 		if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4456 			/* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4457 			   mbxStatus <status>, ring <num> */
4458 
4459 			lpfc_printf_log(phba, KERN_ERR,
4460 					LOG_SLI | LOG_VPORT,
4461 					"1805 Adapter failed to init. "
4462 					"Data: x%x x%x x%x\n",
4463 					pmbox->mbxCommand,
4464 					pmbox->mbxStatus, hbqno);
4465 
4466 			phba->link_state = LPFC_HBA_ERROR;
4467 			mempool_free(pmb, phba->mbox_mem_pool);
4468 			return -ENXIO;
4469 		}
4470 	}
4471 	phba->hbq_count = hbq_count;
4472 
4473 	mempool_free(pmb, phba->mbox_mem_pool);
4474 
4475 	/* Initially populate or replenish the HBQs */
4476 	for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4477 		lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4478 	return 0;
4479 }
4480 
4481 /**
4482  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4483  * @phba: Pointer to HBA context object.
4484  *
4485  * This function is called during the SLI initialization to configure
4486  * all the HBQs and post buffers to the HBQ. The caller is not
4487  * required to hold any locks. This function will return zero if successful
4488  * else it will return negative error code.
4489  **/
4490 static int
4491 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4492 {
4493 	phba->hbq_in_use = 1;
4494 	phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4495 	phba->hbq_count = 1;
4496 	/* Initially populate or replenish the HBQs */
4497 	lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4498 	return 0;
4499 }
4500 
4501 /**
4502  * lpfc_sli_config_port - Issue config port mailbox command
4503  * @phba: Pointer to HBA context object.
4504  * @sli_mode: sli mode - 2/3
4505  *
4506  * This function is called by the sli intialization code path
4507  * to issue config_port mailbox command. This function restarts the
4508  * HBA firmware and issues a config_port mailbox command to configure
4509  * the SLI interface in the sli mode specified by sli_mode
4510  * variable. The caller is not required to hold any locks.
4511  * The function returns 0 if successful, else returns negative error
4512  * code.
4513  **/
4514 int
4515 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4516 {
4517 	LPFC_MBOXQ_t *pmb;
4518 	uint32_t resetcount = 0, rc = 0, done = 0;
4519 
4520 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4521 	if (!pmb) {
4522 		phba->link_state = LPFC_HBA_ERROR;
4523 		return -ENOMEM;
4524 	}
4525 
4526 	phba->sli_rev = sli_mode;
4527 	while (resetcount < 2 && !done) {
4528 		spin_lock_irq(&phba->hbalock);
4529 		phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4530 		spin_unlock_irq(&phba->hbalock);
4531 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4532 		lpfc_sli_brdrestart(phba);
4533 		rc = lpfc_sli_chipset_init(phba);
4534 		if (rc)
4535 			break;
4536 
4537 		spin_lock_irq(&phba->hbalock);
4538 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4539 		spin_unlock_irq(&phba->hbalock);
4540 		resetcount++;
4541 
4542 		/* Call pre CONFIG_PORT mailbox command initialization.  A
4543 		 * value of 0 means the call was successful.  Any other
4544 		 * nonzero value is a failure, but if ERESTART is returned,
4545 		 * the driver may reset the HBA and try again.
4546 		 */
4547 		rc = lpfc_config_port_prep(phba);
4548 		if (rc == -ERESTART) {
4549 			phba->link_state = LPFC_LINK_UNKNOWN;
4550 			continue;
4551 		} else if (rc)
4552 			break;
4553 
4554 		phba->link_state = LPFC_INIT_MBX_CMDS;
4555 		lpfc_config_port(phba, pmb);
4556 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4557 		phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4558 					LPFC_SLI3_HBQ_ENABLED |
4559 					LPFC_SLI3_CRP_ENABLED |
4560 					LPFC_SLI3_BG_ENABLED |
4561 					LPFC_SLI3_DSS_ENABLED);
4562 		if (rc != MBX_SUCCESS) {
4563 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4564 				"0442 Adapter failed to init, mbxCmd x%x "
4565 				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4566 				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4567 			spin_lock_irq(&phba->hbalock);
4568 			phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4569 			spin_unlock_irq(&phba->hbalock);
4570 			rc = -ENXIO;
4571 		} else {
4572 			/* Allow asynchronous mailbox command to go through */
4573 			spin_lock_irq(&phba->hbalock);
4574 			phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4575 			spin_unlock_irq(&phba->hbalock);
4576 			done = 1;
4577 
4578 			if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4579 			    (pmb->u.mb.un.varCfgPort.gasabt == 0))
4580 				lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4581 					"3110 Port did not grant ASABT\n");
4582 		}
4583 	}
4584 	if (!done) {
4585 		rc = -EINVAL;
4586 		goto do_prep_failed;
4587 	}
4588 	if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4589 		if (!pmb->u.mb.un.varCfgPort.cMA) {
4590 			rc = -ENXIO;
4591 			goto do_prep_failed;
4592 		}
4593 		if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4594 			phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4595 			phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4596 			phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4597 				phba->max_vpi : phba->max_vports;
4598 
4599 		} else
4600 			phba->max_vpi = 0;
4601 		phba->fips_level = 0;
4602 		phba->fips_spec_rev = 0;
4603 		if (pmb->u.mb.un.varCfgPort.gdss) {
4604 			phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4605 			phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4606 			phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4607 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4608 					"2850 Security Crypto Active. FIPS x%d "
4609 					"(Spec Rev: x%d)",
4610 					phba->fips_level, phba->fips_spec_rev);
4611 		}
4612 		if (pmb->u.mb.un.varCfgPort.sec_err) {
4613 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4614 					"2856 Config Port Security Crypto "
4615 					"Error: x%x ",
4616 					pmb->u.mb.un.varCfgPort.sec_err);
4617 		}
4618 		if (pmb->u.mb.un.varCfgPort.gerbm)
4619 			phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4620 		if (pmb->u.mb.un.varCfgPort.gcrp)
4621 			phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4622 
4623 		phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4624 		phba->port_gp = phba->mbox->us.s3_pgp.port;
4625 
4626 		if (phba->cfg_enable_bg) {
4627 			if (pmb->u.mb.un.varCfgPort.gbg)
4628 				phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4629 			else
4630 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4631 						"0443 Adapter did not grant "
4632 						"BlockGuard\n");
4633 		}
4634 	} else {
4635 		phba->hbq_get = NULL;
4636 		phba->port_gp = phba->mbox->us.s2.port;
4637 		phba->max_vpi = 0;
4638 	}
4639 do_prep_failed:
4640 	mempool_free(pmb, phba->mbox_mem_pool);
4641 	return rc;
4642 }
4643 
4644 
4645 /**
4646  * lpfc_sli_hba_setup - SLI intialization function
4647  * @phba: Pointer to HBA context object.
4648  *
4649  * This function is the main SLI intialization function. This function
4650  * is called by the HBA intialization code, HBA reset code and HBA
4651  * error attention handler code. Caller is not required to hold any
4652  * locks. This function issues config_port mailbox command to configure
4653  * the SLI, setup iocb rings and HBQ rings. In the end the function
4654  * calls the config_port_post function to issue init_link mailbox
4655  * command and to start the discovery. The function will return zero
4656  * if successful, else it will return negative error code.
4657  **/
4658 int
4659 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4660 {
4661 	uint32_t rc;
4662 	int  mode = 3, i;
4663 	int longs;
4664 
4665 	switch (phba->cfg_sli_mode) {
4666 	case 2:
4667 		if (phba->cfg_enable_npiv) {
4668 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4669 				"1824 NPIV enabled: Override sli_mode "
4670 				"parameter (%d) to auto (0).\n",
4671 				phba->cfg_sli_mode);
4672 			break;
4673 		}
4674 		mode = 2;
4675 		break;
4676 	case 0:
4677 	case 3:
4678 		break;
4679 	default:
4680 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4681 				"1819 Unrecognized sli_mode parameter: %d.\n",
4682 				phba->cfg_sli_mode);
4683 
4684 		break;
4685 	}
4686 	phba->fcp_embed_io = 0;	/* SLI4 FC support only */
4687 
4688 	rc = lpfc_sli_config_port(phba, mode);
4689 
4690 	if (rc && phba->cfg_sli_mode == 3)
4691 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4692 				"1820 Unable to select SLI-3.  "
4693 				"Not supported by adapter.\n");
4694 	if (rc && mode != 2)
4695 		rc = lpfc_sli_config_port(phba, 2);
4696 	else if (rc && mode == 2)
4697 		rc = lpfc_sli_config_port(phba, 3);
4698 	if (rc)
4699 		goto lpfc_sli_hba_setup_error;
4700 
4701 	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
4702 	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4703 		rc = pci_enable_pcie_error_reporting(phba->pcidev);
4704 		if (!rc) {
4705 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4706 					"2709 This device supports "
4707 					"Advanced Error Reporting (AER)\n");
4708 			spin_lock_irq(&phba->hbalock);
4709 			phba->hba_flag |= HBA_AER_ENABLED;
4710 			spin_unlock_irq(&phba->hbalock);
4711 		} else {
4712 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4713 					"2708 This device does not support "
4714 					"Advanced Error Reporting (AER): %d\n",
4715 					rc);
4716 			phba->cfg_aer_support = 0;
4717 		}
4718 	}
4719 
4720 	if (phba->sli_rev == 3) {
4721 		phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4722 		phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4723 	} else {
4724 		phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4725 		phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4726 		phba->sli3_options = 0;
4727 	}
4728 
4729 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4730 			"0444 Firmware in SLI %x mode. Max_vpi %d\n",
4731 			phba->sli_rev, phba->max_vpi);
4732 	rc = lpfc_sli_ring_map(phba);
4733 
4734 	if (rc)
4735 		goto lpfc_sli_hba_setup_error;
4736 
4737 	/* Initialize VPIs. */
4738 	if (phba->sli_rev == LPFC_SLI_REV3) {
4739 		/*
4740 		 * The VPI bitmask and physical ID array are allocated
4741 		 * and initialized once only - at driver load.  A port
4742 		 * reset doesn't need to reinitialize this memory.
4743 		 */
4744 		if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4745 			longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4746 			phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4747 						  GFP_KERNEL);
4748 			if (!phba->vpi_bmask) {
4749 				rc = -ENOMEM;
4750 				goto lpfc_sli_hba_setup_error;
4751 			}
4752 
4753 			phba->vpi_ids = kzalloc(
4754 					(phba->max_vpi+1) * sizeof(uint16_t),
4755 					GFP_KERNEL);
4756 			if (!phba->vpi_ids) {
4757 				kfree(phba->vpi_bmask);
4758 				rc = -ENOMEM;
4759 				goto lpfc_sli_hba_setup_error;
4760 			}
4761 			for (i = 0; i < phba->max_vpi; i++)
4762 				phba->vpi_ids[i] = i;
4763 		}
4764 	}
4765 
4766 	/* Init HBQs */
4767 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4768 		rc = lpfc_sli_hbq_setup(phba);
4769 		if (rc)
4770 			goto lpfc_sli_hba_setup_error;
4771 	}
4772 	spin_lock_irq(&phba->hbalock);
4773 	phba->sli.sli_flag |= LPFC_PROCESS_LA;
4774 	spin_unlock_irq(&phba->hbalock);
4775 
4776 	rc = lpfc_config_port_post(phba);
4777 	if (rc)
4778 		goto lpfc_sli_hba_setup_error;
4779 
4780 	return rc;
4781 
4782 lpfc_sli_hba_setup_error:
4783 	phba->link_state = LPFC_HBA_ERROR;
4784 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4785 			"0445 Firmware initialization failed\n");
4786 	return rc;
4787 }
4788 
4789 /**
4790  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4791  * @phba: Pointer to HBA context object.
4792  * @mboxq: mailbox pointer.
4793  * This function issue a dump mailbox command to read config region
4794  * 23 and parse the records in the region and populate driver
4795  * data structure.
4796  **/
4797 static int
4798 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4799 {
4800 	LPFC_MBOXQ_t *mboxq;
4801 	struct lpfc_dmabuf *mp;
4802 	struct lpfc_mqe *mqe;
4803 	uint32_t data_length;
4804 	int rc;
4805 
4806 	/* Program the default value of vlan_id and fc_map */
4807 	phba->valid_vlan = 0;
4808 	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4809 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4810 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4811 
4812 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4813 	if (!mboxq)
4814 		return -ENOMEM;
4815 
4816 	mqe = &mboxq->u.mqe;
4817 	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4818 		rc = -ENOMEM;
4819 		goto out_free_mboxq;
4820 	}
4821 
4822 	mp = (struct lpfc_dmabuf *) mboxq->context1;
4823 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4824 
4825 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4826 			"(%d):2571 Mailbox cmd x%x Status x%x "
4827 			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4828 			"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4829 			"CQ: x%x x%x x%x x%x\n",
4830 			mboxq->vport ? mboxq->vport->vpi : 0,
4831 			bf_get(lpfc_mqe_command, mqe),
4832 			bf_get(lpfc_mqe_status, mqe),
4833 			mqe->un.mb_words[0], mqe->un.mb_words[1],
4834 			mqe->un.mb_words[2], mqe->un.mb_words[3],
4835 			mqe->un.mb_words[4], mqe->un.mb_words[5],
4836 			mqe->un.mb_words[6], mqe->un.mb_words[7],
4837 			mqe->un.mb_words[8], mqe->un.mb_words[9],
4838 			mqe->un.mb_words[10], mqe->un.mb_words[11],
4839 			mqe->un.mb_words[12], mqe->un.mb_words[13],
4840 			mqe->un.mb_words[14], mqe->un.mb_words[15],
4841 			mqe->un.mb_words[16], mqe->un.mb_words[50],
4842 			mboxq->mcqe.word0,
4843 			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
4844 			mboxq->mcqe.trailer);
4845 
4846 	if (rc) {
4847 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
4848 		kfree(mp);
4849 		rc = -EIO;
4850 		goto out_free_mboxq;
4851 	}
4852 	data_length = mqe->un.mb_words[5];
4853 	if (data_length > DMP_RGN23_SIZE) {
4854 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
4855 		kfree(mp);
4856 		rc = -EIO;
4857 		goto out_free_mboxq;
4858 	}
4859 
4860 	lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4861 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
4862 	kfree(mp);
4863 	rc = 0;
4864 
4865 out_free_mboxq:
4866 	mempool_free(mboxq, phba->mbox_mem_pool);
4867 	return rc;
4868 }
4869 
4870 /**
4871  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4872  * @phba: pointer to lpfc hba data structure.
4873  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4874  * @vpd: pointer to the memory to hold resulting port vpd data.
4875  * @vpd_size: On input, the number of bytes allocated to @vpd.
4876  *	      On output, the number of data bytes in @vpd.
4877  *
4878  * This routine executes a READ_REV SLI4 mailbox command.  In
4879  * addition, this routine gets the port vpd data.
4880  *
4881  * Return codes
4882  * 	0 - successful
4883  * 	-ENOMEM - could not allocated memory.
4884  **/
4885 static int
4886 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4887 		    uint8_t *vpd, uint32_t *vpd_size)
4888 {
4889 	int rc = 0;
4890 	uint32_t dma_size;
4891 	struct lpfc_dmabuf *dmabuf;
4892 	struct lpfc_mqe *mqe;
4893 
4894 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4895 	if (!dmabuf)
4896 		return -ENOMEM;
4897 
4898 	/*
4899 	 * Get a DMA buffer for the vpd data resulting from the READ_REV
4900 	 * mailbox command.
4901 	 */
4902 	dma_size = *vpd_size;
4903 	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
4904 					   &dmabuf->phys, GFP_KERNEL);
4905 	if (!dmabuf->virt) {
4906 		kfree(dmabuf);
4907 		return -ENOMEM;
4908 	}
4909 
4910 	/*
4911 	 * The SLI4 implementation of READ_REV conflicts at word1,
4912 	 * bits 31:16 and SLI4 adds vpd functionality not present
4913 	 * in SLI3.  This code corrects the conflicts.
4914 	 */
4915 	lpfc_read_rev(phba, mboxq);
4916 	mqe = &mboxq->u.mqe;
4917 	mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4918 	mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4919 	mqe->un.read_rev.word1 &= 0x0000FFFF;
4920 	bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4921 	bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4922 
4923 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4924 	if (rc) {
4925 		dma_free_coherent(&phba->pcidev->dev, dma_size,
4926 				  dmabuf->virt, dmabuf->phys);
4927 		kfree(dmabuf);
4928 		return -EIO;
4929 	}
4930 
4931 	/*
4932 	 * The available vpd length cannot be bigger than the
4933 	 * DMA buffer passed to the port.  Catch the less than
4934 	 * case and update the caller's size.
4935 	 */
4936 	if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4937 		*vpd_size = mqe->un.read_rev.avail_vpd_len;
4938 
4939 	memcpy(vpd, dmabuf->virt, *vpd_size);
4940 
4941 	dma_free_coherent(&phba->pcidev->dev, dma_size,
4942 			  dmabuf->virt, dmabuf->phys);
4943 	kfree(dmabuf);
4944 	return 0;
4945 }
4946 
4947 /**
4948  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4949  * @phba: pointer to lpfc hba data structure.
4950  *
4951  * This routine retrieves SLI4 device physical port name this PCI function
4952  * is attached to.
4953  *
4954  * Return codes
4955  *      0 - successful
4956  *      otherwise - failed to retrieve physical port name
4957  **/
4958 static int
4959 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4960 {
4961 	LPFC_MBOXQ_t *mboxq;
4962 	struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4963 	struct lpfc_controller_attribute *cntl_attr;
4964 	struct lpfc_mbx_get_port_name *get_port_name;
4965 	void *virtaddr = NULL;
4966 	uint32_t alloclen, reqlen;
4967 	uint32_t shdr_status, shdr_add_status;
4968 	union lpfc_sli4_cfg_shdr *shdr;
4969 	char cport_name = 0;
4970 	int rc;
4971 
4972 	/* We assume nothing at this point */
4973 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4974 	phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4975 
4976 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4977 	if (!mboxq)
4978 		return -ENOMEM;
4979 	/* obtain link type and link number via READ_CONFIG */
4980 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4981 	lpfc_sli4_read_config(phba);
4982 	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4983 		goto retrieve_ppname;
4984 
4985 	/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4986 	reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4987 	alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4988 			LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4989 			LPFC_SLI4_MBX_NEMBED);
4990 	if (alloclen < reqlen) {
4991 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4992 				"3084 Allocated DMA memory size (%d) is "
4993 				"less than the requested DMA memory size "
4994 				"(%d)\n", alloclen, reqlen);
4995 		rc = -ENOMEM;
4996 		goto out_free_mboxq;
4997 	}
4998 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4999 	virtaddr = mboxq->sge_array->addr[0];
5000 	mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5001 	shdr = &mbx_cntl_attr->cfg_shdr;
5002 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5003 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5004 	if (shdr_status || shdr_add_status || rc) {
5005 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5006 				"3085 Mailbox x%x (x%x/x%x) failed, "
5007 				"rc:x%x, status:x%x, add_status:x%x\n",
5008 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5009 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5010 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5011 				rc, shdr_status, shdr_add_status);
5012 		rc = -ENXIO;
5013 		goto out_free_mboxq;
5014 	}
5015 	cntl_attr = &mbx_cntl_attr->cntl_attr;
5016 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5017 	phba->sli4_hba.lnk_info.lnk_tp =
5018 		bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5019 	phba->sli4_hba.lnk_info.lnk_no =
5020 		bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5021 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5022 			"3086 lnk_type:%d, lnk_numb:%d\n",
5023 			phba->sli4_hba.lnk_info.lnk_tp,
5024 			phba->sli4_hba.lnk_info.lnk_no);
5025 
5026 retrieve_ppname:
5027 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5028 		LPFC_MBOX_OPCODE_GET_PORT_NAME,
5029 		sizeof(struct lpfc_mbx_get_port_name) -
5030 		sizeof(struct lpfc_sli4_cfg_mhdr),
5031 		LPFC_SLI4_MBX_EMBED);
5032 	get_port_name = &mboxq->u.mqe.un.get_port_name;
5033 	shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5034 	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5035 	bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5036 		phba->sli4_hba.lnk_info.lnk_tp);
5037 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5038 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5039 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5040 	if (shdr_status || shdr_add_status || rc) {
5041 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5042 				"3087 Mailbox x%x (x%x/x%x) failed: "
5043 				"rc:x%x, status:x%x, add_status:x%x\n",
5044 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5045 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5046 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5047 				rc, shdr_status, shdr_add_status);
5048 		rc = -ENXIO;
5049 		goto out_free_mboxq;
5050 	}
5051 	switch (phba->sli4_hba.lnk_info.lnk_no) {
5052 	case LPFC_LINK_NUMBER_0:
5053 		cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5054 				&get_port_name->u.response);
5055 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5056 		break;
5057 	case LPFC_LINK_NUMBER_1:
5058 		cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5059 				&get_port_name->u.response);
5060 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5061 		break;
5062 	case LPFC_LINK_NUMBER_2:
5063 		cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5064 				&get_port_name->u.response);
5065 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5066 		break;
5067 	case LPFC_LINK_NUMBER_3:
5068 		cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5069 				&get_port_name->u.response);
5070 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5071 		break;
5072 	default:
5073 		break;
5074 	}
5075 
5076 	if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5077 		phba->Port[0] = cport_name;
5078 		phba->Port[1] = '\0';
5079 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5080 				"3091 SLI get port name: %s\n", phba->Port);
5081 	}
5082 
5083 out_free_mboxq:
5084 	if (rc != MBX_TIMEOUT) {
5085 		if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5086 			lpfc_sli4_mbox_cmd_free(phba, mboxq);
5087 		else
5088 			mempool_free(mboxq, phba->mbox_mem_pool);
5089 	}
5090 	return rc;
5091 }
5092 
5093 /**
5094  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5095  * @phba: pointer to lpfc hba data structure.
5096  *
5097  * This routine is called to explicitly arm the SLI4 device's completion and
5098  * event queues
5099  **/
5100 static void
5101 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5102 {
5103 	int fcp_eqidx;
5104 
5105 	lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
5106 	lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
5107 	fcp_eqidx = 0;
5108 	if (phba->sli4_hba.fcp_cq) {
5109 		do {
5110 			lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
5111 					     LPFC_QUEUE_REARM);
5112 		} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
5113 	}
5114 
5115 	if (phba->cfg_fof)
5116 		lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5117 
5118 	if (phba->sli4_hba.hba_eq) {
5119 		for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
5120 		     fcp_eqidx++)
5121 			lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
5122 					     LPFC_QUEUE_REARM);
5123 	}
5124 
5125 	if (phba->cfg_fof)
5126 		lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
5127 }
5128 
5129 /**
5130  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5131  * @phba: Pointer to HBA context object.
5132  * @type: The resource extent type.
5133  * @extnt_count: buffer to hold port available extent count.
5134  * @extnt_size: buffer to hold element count per extent.
5135  *
5136  * This function calls the port and retrievs the number of available
5137  * extents and their size for a particular extent type.
5138  *
5139  * Returns: 0 if successful.  Nonzero otherwise.
5140  **/
5141 int
5142 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5143 			       uint16_t *extnt_count, uint16_t *extnt_size)
5144 {
5145 	int rc = 0;
5146 	uint32_t length;
5147 	uint32_t mbox_tmo;
5148 	struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5149 	LPFC_MBOXQ_t *mbox;
5150 
5151 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5152 	if (!mbox)
5153 		return -ENOMEM;
5154 
5155 	/* Find out how many extents are available for this resource type */
5156 	length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5157 		  sizeof(struct lpfc_sli4_cfg_mhdr));
5158 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5159 			 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5160 			 length, LPFC_SLI4_MBX_EMBED);
5161 
5162 	/* Send an extents count of 0 - the GET doesn't use it. */
5163 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5164 					LPFC_SLI4_MBX_EMBED);
5165 	if (unlikely(rc)) {
5166 		rc = -EIO;
5167 		goto err_exit;
5168 	}
5169 
5170 	if (!phba->sli4_hba.intr_enable)
5171 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5172 	else {
5173 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5174 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5175 	}
5176 	if (unlikely(rc)) {
5177 		rc = -EIO;
5178 		goto err_exit;
5179 	}
5180 
5181 	rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5182 	if (bf_get(lpfc_mbox_hdr_status,
5183 		   &rsrc_info->header.cfg_shdr.response)) {
5184 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5185 				"2930 Failed to get resource extents "
5186 				"Status 0x%x Add'l Status 0x%x\n",
5187 				bf_get(lpfc_mbox_hdr_status,
5188 				       &rsrc_info->header.cfg_shdr.response),
5189 				bf_get(lpfc_mbox_hdr_add_status,
5190 				       &rsrc_info->header.cfg_shdr.response));
5191 		rc = -EIO;
5192 		goto err_exit;
5193 	}
5194 
5195 	*extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5196 			      &rsrc_info->u.rsp);
5197 	*extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5198 			     &rsrc_info->u.rsp);
5199 
5200 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5201 			"3162 Retrieved extents type-%d from port: count:%d, "
5202 			"size:%d\n", type, *extnt_count, *extnt_size);
5203 
5204 err_exit:
5205 	mempool_free(mbox, phba->mbox_mem_pool);
5206 	return rc;
5207 }
5208 
5209 /**
5210  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5211  * @phba: Pointer to HBA context object.
5212  * @type: The extent type to check.
5213  *
5214  * This function reads the current available extents from the port and checks
5215  * if the extent count or extent size has changed since the last access.
5216  * Callers use this routine post port reset to understand if there is a
5217  * extent reprovisioning requirement.
5218  *
5219  * Returns:
5220  *   -Error: error indicates problem.
5221  *   1: Extent count or size has changed.
5222  *   0: No changes.
5223  **/
5224 static int
5225 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5226 {
5227 	uint16_t curr_ext_cnt, rsrc_ext_cnt;
5228 	uint16_t size_diff, rsrc_ext_size;
5229 	int rc = 0;
5230 	struct lpfc_rsrc_blks *rsrc_entry;
5231 	struct list_head *rsrc_blk_list = NULL;
5232 
5233 	size_diff = 0;
5234 	curr_ext_cnt = 0;
5235 	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5236 					    &rsrc_ext_cnt,
5237 					    &rsrc_ext_size);
5238 	if (unlikely(rc))
5239 		return -EIO;
5240 
5241 	switch (type) {
5242 	case LPFC_RSC_TYPE_FCOE_RPI:
5243 		rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5244 		break;
5245 	case LPFC_RSC_TYPE_FCOE_VPI:
5246 		rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5247 		break;
5248 	case LPFC_RSC_TYPE_FCOE_XRI:
5249 		rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5250 		break;
5251 	case LPFC_RSC_TYPE_FCOE_VFI:
5252 		rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5253 		break;
5254 	default:
5255 		break;
5256 	}
5257 
5258 	list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5259 		curr_ext_cnt++;
5260 		if (rsrc_entry->rsrc_size != rsrc_ext_size)
5261 			size_diff++;
5262 	}
5263 
5264 	if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5265 		rc = 1;
5266 
5267 	return rc;
5268 }
5269 
5270 /**
5271  * lpfc_sli4_cfg_post_extnts -
5272  * @phba: Pointer to HBA context object.
5273  * @extnt_cnt - number of available extents.
5274  * @type - the extent type (rpi, xri, vfi, vpi).
5275  * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5276  * @mbox - pointer to the caller's allocated mailbox structure.
5277  *
5278  * This function executes the extents allocation request.  It also
5279  * takes care of the amount of memory needed to allocate or get the
5280  * allocated extents. It is the caller's responsibility to evaluate
5281  * the response.
5282  *
5283  * Returns:
5284  *   -Error:  Error value describes the condition found.
5285  *   0: if successful
5286  **/
5287 static int
5288 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5289 			  uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5290 {
5291 	int rc = 0;
5292 	uint32_t req_len;
5293 	uint32_t emb_len;
5294 	uint32_t alloc_len, mbox_tmo;
5295 
5296 	/* Calculate the total requested length of the dma memory */
5297 	req_len = extnt_cnt * sizeof(uint16_t);
5298 
5299 	/*
5300 	 * Calculate the size of an embedded mailbox.  The uint32_t
5301 	 * accounts for extents-specific word.
5302 	 */
5303 	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5304 		sizeof(uint32_t);
5305 
5306 	/*
5307 	 * Presume the allocation and response will fit into an embedded
5308 	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5309 	 */
5310 	*emb = LPFC_SLI4_MBX_EMBED;
5311 	if (req_len > emb_len) {
5312 		req_len = extnt_cnt * sizeof(uint16_t) +
5313 			sizeof(union lpfc_sli4_cfg_shdr) +
5314 			sizeof(uint32_t);
5315 		*emb = LPFC_SLI4_MBX_NEMBED;
5316 	}
5317 
5318 	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5319 				     LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5320 				     req_len, *emb);
5321 	if (alloc_len < req_len) {
5322 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5323 			"2982 Allocated DMA memory size (x%x) is "
5324 			"less than the requested DMA memory "
5325 			"size (x%x)\n", alloc_len, req_len);
5326 		return -ENOMEM;
5327 	}
5328 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5329 	if (unlikely(rc))
5330 		return -EIO;
5331 
5332 	if (!phba->sli4_hba.intr_enable)
5333 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5334 	else {
5335 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5336 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5337 	}
5338 
5339 	if (unlikely(rc))
5340 		rc = -EIO;
5341 	return rc;
5342 }
5343 
5344 /**
5345  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5346  * @phba: Pointer to HBA context object.
5347  * @type:  The resource extent type to allocate.
5348  *
5349  * This function allocates the number of elements for the specified
5350  * resource type.
5351  **/
5352 static int
5353 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5354 {
5355 	bool emb = false;
5356 	uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5357 	uint16_t rsrc_id, rsrc_start, j, k;
5358 	uint16_t *ids;
5359 	int i, rc;
5360 	unsigned long longs;
5361 	unsigned long *bmask;
5362 	struct lpfc_rsrc_blks *rsrc_blks;
5363 	LPFC_MBOXQ_t *mbox;
5364 	uint32_t length;
5365 	struct lpfc_id_range *id_array = NULL;
5366 	void *virtaddr = NULL;
5367 	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5368 	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5369 	struct list_head *ext_blk_list;
5370 
5371 	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5372 					    &rsrc_cnt,
5373 					    &rsrc_size);
5374 	if (unlikely(rc))
5375 		return -EIO;
5376 
5377 	if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5378 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5379 			"3009 No available Resource Extents "
5380 			"for resource type 0x%x: Count: 0x%x, "
5381 			"Size 0x%x\n", type, rsrc_cnt,
5382 			rsrc_size);
5383 		return -ENOMEM;
5384 	}
5385 
5386 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5387 			"2903 Post resource extents type-0x%x: "
5388 			"count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5389 
5390 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5391 	if (!mbox)
5392 		return -ENOMEM;
5393 
5394 	rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5395 	if (unlikely(rc)) {
5396 		rc = -EIO;
5397 		goto err_exit;
5398 	}
5399 
5400 	/*
5401 	 * Figure out where the response is located.  Then get local pointers
5402 	 * to the response data.  The port does not guarantee to respond to
5403 	 * all extents counts request so update the local variable with the
5404 	 * allocated count from the port.
5405 	 */
5406 	if (emb == LPFC_SLI4_MBX_EMBED) {
5407 		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5408 		id_array = &rsrc_ext->u.rsp.id[0];
5409 		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5410 	} else {
5411 		virtaddr = mbox->sge_array->addr[0];
5412 		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5413 		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5414 		id_array = &n_rsrc->id;
5415 	}
5416 
5417 	longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5418 	rsrc_id_cnt = rsrc_cnt * rsrc_size;
5419 
5420 	/*
5421 	 * Based on the resource size and count, correct the base and max
5422 	 * resource values.
5423 	 */
5424 	length = sizeof(struct lpfc_rsrc_blks);
5425 	switch (type) {
5426 	case LPFC_RSC_TYPE_FCOE_RPI:
5427 		phba->sli4_hba.rpi_bmask = kzalloc(longs *
5428 						   sizeof(unsigned long),
5429 						   GFP_KERNEL);
5430 		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5431 			rc = -ENOMEM;
5432 			goto err_exit;
5433 		}
5434 		phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5435 						 sizeof(uint16_t),
5436 						 GFP_KERNEL);
5437 		if (unlikely(!phba->sli4_hba.rpi_ids)) {
5438 			kfree(phba->sli4_hba.rpi_bmask);
5439 			rc = -ENOMEM;
5440 			goto err_exit;
5441 		}
5442 
5443 		/*
5444 		 * The next_rpi was initialized with the maximum available
5445 		 * count but the port may allocate a smaller number.  Catch
5446 		 * that case and update the next_rpi.
5447 		 */
5448 		phba->sli4_hba.next_rpi = rsrc_id_cnt;
5449 
5450 		/* Initialize local ptrs for common extent processing later. */
5451 		bmask = phba->sli4_hba.rpi_bmask;
5452 		ids = phba->sli4_hba.rpi_ids;
5453 		ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5454 		break;
5455 	case LPFC_RSC_TYPE_FCOE_VPI:
5456 		phba->vpi_bmask = kzalloc(longs *
5457 					  sizeof(unsigned long),
5458 					  GFP_KERNEL);
5459 		if (unlikely(!phba->vpi_bmask)) {
5460 			rc = -ENOMEM;
5461 			goto err_exit;
5462 		}
5463 		phba->vpi_ids = kzalloc(rsrc_id_cnt *
5464 					 sizeof(uint16_t),
5465 					 GFP_KERNEL);
5466 		if (unlikely(!phba->vpi_ids)) {
5467 			kfree(phba->vpi_bmask);
5468 			rc = -ENOMEM;
5469 			goto err_exit;
5470 		}
5471 
5472 		/* Initialize local ptrs for common extent processing later. */
5473 		bmask = phba->vpi_bmask;
5474 		ids = phba->vpi_ids;
5475 		ext_blk_list = &phba->lpfc_vpi_blk_list;
5476 		break;
5477 	case LPFC_RSC_TYPE_FCOE_XRI:
5478 		phba->sli4_hba.xri_bmask = kzalloc(longs *
5479 						   sizeof(unsigned long),
5480 						   GFP_KERNEL);
5481 		if (unlikely(!phba->sli4_hba.xri_bmask)) {
5482 			rc = -ENOMEM;
5483 			goto err_exit;
5484 		}
5485 		phba->sli4_hba.max_cfg_param.xri_used = 0;
5486 		phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5487 						 sizeof(uint16_t),
5488 						 GFP_KERNEL);
5489 		if (unlikely(!phba->sli4_hba.xri_ids)) {
5490 			kfree(phba->sli4_hba.xri_bmask);
5491 			rc = -ENOMEM;
5492 			goto err_exit;
5493 		}
5494 
5495 		/* Initialize local ptrs for common extent processing later. */
5496 		bmask = phba->sli4_hba.xri_bmask;
5497 		ids = phba->sli4_hba.xri_ids;
5498 		ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5499 		break;
5500 	case LPFC_RSC_TYPE_FCOE_VFI:
5501 		phba->sli4_hba.vfi_bmask = kzalloc(longs *
5502 						   sizeof(unsigned long),
5503 						   GFP_KERNEL);
5504 		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5505 			rc = -ENOMEM;
5506 			goto err_exit;
5507 		}
5508 		phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5509 						 sizeof(uint16_t),
5510 						 GFP_KERNEL);
5511 		if (unlikely(!phba->sli4_hba.vfi_ids)) {
5512 			kfree(phba->sli4_hba.vfi_bmask);
5513 			rc = -ENOMEM;
5514 			goto err_exit;
5515 		}
5516 
5517 		/* Initialize local ptrs for common extent processing later. */
5518 		bmask = phba->sli4_hba.vfi_bmask;
5519 		ids = phba->sli4_hba.vfi_ids;
5520 		ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5521 		break;
5522 	default:
5523 		/* Unsupported Opcode.  Fail call. */
5524 		id_array = NULL;
5525 		bmask = NULL;
5526 		ids = NULL;
5527 		ext_blk_list = NULL;
5528 		goto err_exit;
5529 	}
5530 
5531 	/*
5532 	 * Complete initializing the extent configuration with the
5533 	 * allocated ids assigned to this function.  The bitmask serves
5534 	 * as an index into the array and manages the available ids.  The
5535 	 * array just stores the ids communicated to the port via the wqes.
5536 	 */
5537 	for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5538 		if ((i % 2) == 0)
5539 			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5540 					 &id_array[k]);
5541 		else
5542 			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5543 					 &id_array[k]);
5544 
5545 		rsrc_blks = kzalloc(length, GFP_KERNEL);
5546 		if (unlikely(!rsrc_blks)) {
5547 			rc = -ENOMEM;
5548 			kfree(bmask);
5549 			kfree(ids);
5550 			goto err_exit;
5551 		}
5552 		rsrc_blks->rsrc_start = rsrc_id;
5553 		rsrc_blks->rsrc_size = rsrc_size;
5554 		list_add_tail(&rsrc_blks->list, ext_blk_list);
5555 		rsrc_start = rsrc_id;
5556 		if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5557 			phba->sli4_hba.scsi_xri_start = rsrc_start +
5558 				lpfc_sli4_get_els_iocb_cnt(phba);
5559 
5560 		while (rsrc_id < (rsrc_start + rsrc_size)) {
5561 			ids[j] = rsrc_id;
5562 			rsrc_id++;
5563 			j++;
5564 		}
5565 		/* Entire word processed.  Get next word.*/
5566 		if ((i % 2) == 1)
5567 			k++;
5568 	}
5569  err_exit:
5570 	lpfc_sli4_mbox_cmd_free(phba, mbox);
5571 	return rc;
5572 }
5573 
5574 /**
5575  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5576  * @phba: Pointer to HBA context object.
5577  * @type: the extent's type.
5578  *
5579  * This function deallocates all extents of a particular resource type.
5580  * SLI4 does not allow for deallocating a particular extent range.  It
5581  * is the caller's responsibility to release all kernel memory resources.
5582  **/
5583 static int
5584 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5585 {
5586 	int rc;
5587 	uint32_t length, mbox_tmo = 0;
5588 	LPFC_MBOXQ_t *mbox;
5589 	struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5590 	struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5591 
5592 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5593 	if (!mbox)
5594 		return -ENOMEM;
5595 
5596 	/*
5597 	 * This function sends an embedded mailbox because it only sends the
5598 	 * the resource type.  All extents of this type are released by the
5599 	 * port.
5600 	 */
5601 	length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5602 		  sizeof(struct lpfc_sli4_cfg_mhdr));
5603 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5604 			 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5605 			 length, LPFC_SLI4_MBX_EMBED);
5606 
5607 	/* Send an extents count of 0 - the dealloc doesn't use it. */
5608 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5609 					LPFC_SLI4_MBX_EMBED);
5610 	if (unlikely(rc)) {
5611 		rc = -EIO;
5612 		goto out_free_mbox;
5613 	}
5614 	if (!phba->sli4_hba.intr_enable)
5615 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5616 	else {
5617 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5618 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5619 	}
5620 	if (unlikely(rc)) {
5621 		rc = -EIO;
5622 		goto out_free_mbox;
5623 	}
5624 
5625 	dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5626 	if (bf_get(lpfc_mbox_hdr_status,
5627 		   &dealloc_rsrc->header.cfg_shdr.response)) {
5628 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5629 				"2919 Failed to release resource extents "
5630 				"for type %d - Status 0x%x Add'l Status 0x%x. "
5631 				"Resource memory not released.\n",
5632 				type,
5633 				bf_get(lpfc_mbox_hdr_status,
5634 				    &dealloc_rsrc->header.cfg_shdr.response),
5635 				bf_get(lpfc_mbox_hdr_add_status,
5636 				    &dealloc_rsrc->header.cfg_shdr.response));
5637 		rc = -EIO;
5638 		goto out_free_mbox;
5639 	}
5640 
5641 	/* Release kernel memory resources for the specific type. */
5642 	switch (type) {
5643 	case LPFC_RSC_TYPE_FCOE_VPI:
5644 		kfree(phba->vpi_bmask);
5645 		kfree(phba->vpi_ids);
5646 		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5647 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5648 				    &phba->lpfc_vpi_blk_list, list) {
5649 			list_del_init(&rsrc_blk->list);
5650 			kfree(rsrc_blk);
5651 		}
5652 		phba->sli4_hba.max_cfg_param.vpi_used = 0;
5653 		break;
5654 	case LPFC_RSC_TYPE_FCOE_XRI:
5655 		kfree(phba->sli4_hba.xri_bmask);
5656 		kfree(phba->sli4_hba.xri_ids);
5657 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5658 				    &phba->sli4_hba.lpfc_xri_blk_list, list) {
5659 			list_del_init(&rsrc_blk->list);
5660 			kfree(rsrc_blk);
5661 		}
5662 		break;
5663 	case LPFC_RSC_TYPE_FCOE_VFI:
5664 		kfree(phba->sli4_hba.vfi_bmask);
5665 		kfree(phba->sli4_hba.vfi_ids);
5666 		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5667 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5668 				    &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5669 			list_del_init(&rsrc_blk->list);
5670 			kfree(rsrc_blk);
5671 		}
5672 		break;
5673 	case LPFC_RSC_TYPE_FCOE_RPI:
5674 		/* RPI bitmask and physical id array are cleaned up earlier. */
5675 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5676 				    &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5677 			list_del_init(&rsrc_blk->list);
5678 			kfree(rsrc_blk);
5679 		}
5680 		break;
5681 	default:
5682 		break;
5683 	}
5684 
5685 	bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5686 
5687  out_free_mbox:
5688 	mempool_free(mbox, phba->mbox_mem_pool);
5689 	return rc;
5690 }
5691 
5692 void
5693 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5694 		  uint32_t feature)
5695 {
5696 	uint32_t len;
5697 
5698 	len = sizeof(struct lpfc_mbx_set_feature) -
5699 		sizeof(struct lpfc_sli4_cfg_mhdr);
5700 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5701 			 LPFC_MBOX_OPCODE_SET_FEATURES, len,
5702 			 LPFC_SLI4_MBX_EMBED);
5703 
5704 	switch (feature) {
5705 	case LPFC_SET_UE_RECOVERY:
5706 		bf_set(lpfc_mbx_set_feature_UER,
5707 		       &mbox->u.mqe.un.set_feature, 1);
5708 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
5709 		mbox->u.mqe.un.set_feature.param_len = 8;
5710 		break;
5711 	case LPFC_SET_MDS_DIAGS:
5712 		bf_set(lpfc_mbx_set_feature_mds,
5713 		       &mbox->u.mqe.un.set_feature, 1);
5714 		bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5715 		       &mbox->u.mqe.un.set_feature, 0);
5716 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5717 		mbox->u.mqe.un.set_feature.param_len = 8;
5718 		break;
5719 	}
5720 
5721 	return;
5722 }
5723 
5724 /**
5725  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5726  * @phba: Pointer to HBA context object.
5727  *
5728  * This function allocates all SLI4 resource identifiers.
5729  **/
5730 int
5731 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5732 {
5733 	int i, rc, error = 0;
5734 	uint16_t count, base;
5735 	unsigned long longs;
5736 
5737 	if (!phba->sli4_hba.rpi_hdrs_in_use)
5738 		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5739 	if (phba->sli4_hba.extents_in_use) {
5740 		/*
5741 		 * The port supports resource extents. The XRI, VPI, VFI, RPI
5742 		 * resource extent count must be read and allocated before
5743 		 * provisioning the resource id arrays.
5744 		 */
5745 		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5746 		    LPFC_IDX_RSRC_RDY) {
5747 			/*
5748 			 * Extent-based resources are set - the driver could
5749 			 * be in a port reset. Figure out if any corrective
5750 			 * actions need to be taken.
5751 			 */
5752 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5753 						 LPFC_RSC_TYPE_FCOE_VFI);
5754 			if (rc != 0)
5755 				error++;
5756 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5757 						 LPFC_RSC_TYPE_FCOE_VPI);
5758 			if (rc != 0)
5759 				error++;
5760 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5761 						 LPFC_RSC_TYPE_FCOE_XRI);
5762 			if (rc != 0)
5763 				error++;
5764 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5765 						 LPFC_RSC_TYPE_FCOE_RPI);
5766 			if (rc != 0)
5767 				error++;
5768 
5769 			/*
5770 			 * It's possible that the number of resources
5771 			 * provided to this port instance changed between
5772 			 * resets.  Detect this condition and reallocate
5773 			 * resources.  Otherwise, there is no action.
5774 			 */
5775 			if (error) {
5776 				lpfc_printf_log(phba, KERN_INFO,
5777 						LOG_MBOX | LOG_INIT,
5778 						"2931 Detected extent resource "
5779 						"change.  Reallocating all "
5780 						"extents.\n");
5781 				rc = lpfc_sli4_dealloc_extent(phba,
5782 						 LPFC_RSC_TYPE_FCOE_VFI);
5783 				rc = lpfc_sli4_dealloc_extent(phba,
5784 						 LPFC_RSC_TYPE_FCOE_VPI);
5785 				rc = lpfc_sli4_dealloc_extent(phba,
5786 						 LPFC_RSC_TYPE_FCOE_XRI);
5787 				rc = lpfc_sli4_dealloc_extent(phba,
5788 						 LPFC_RSC_TYPE_FCOE_RPI);
5789 			} else
5790 				return 0;
5791 		}
5792 
5793 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5794 		if (unlikely(rc))
5795 			goto err_exit;
5796 
5797 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5798 		if (unlikely(rc))
5799 			goto err_exit;
5800 
5801 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5802 		if (unlikely(rc))
5803 			goto err_exit;
5804 
5805 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5806 		if (unlikely(rc))
5807 			goto err_exit;
5808 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5809 		       LPFC_IDX_RSRC_RDY);
5810 		return rc;
5811 	} else {
5812 		/*
5813 		 * The port does not support resource extents.  The XRI, VPI,
5814 		 * VFI, RPI resource ids were determined from READ_CONFIG.
5815 		 * Just allocate the bitmasks and provision the resource id
5816 		 * arrays.  If a port reset is active, the resources don't
5817 		 * need any action - just exit.
5818 		 */
5819 		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5820 		    LPFC_IDX_RSRC_RDY) {
5821 			lpfc_sli4_dealloc_resource_identifiers(phba);
5822 			lpfc_sli4_remove_rpis(phba);
5823 		}
5824 		/* RPIs. */
5825 		count = phba->sli4_hba.max_cfg_param.max_rpi;
5826 		if (count <= 0) {
5827 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5828 					"3279 Invalid provisioning of "
5829 					"rpi:%d\n", count);
5830 			rc = -EINVAL;
5831 			goto err_exit;
5832 		}
5833 		base = phba->sli4_hba.max_cfg_param.rpi_base;
5834 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5835 		phba->sli4_hba.rpi_bmask = kzalloc(longs *
5836 						   sizeof(unsigned long),
5837 						   GFP_KERNEL);
5838 		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5839 			rc = -ENOMEM;
5840 			goto err_exit;
5841 		}
5842 		phba->sli4_hba.rpi_ids = kzalloc(count *
5843 						 sizeof(uint16_t),
5844 						 GFP_KERNEL);
5845 		if (unlikely(!phba->sli4_hba.rpi_ids)) {
5846 			rc = -ENOMEM;
5847 			goto free_rpi_bmask;
5848 		}
5849 
5850 		for (i = 0; i < count; i++)
5851 			phba->sli4_hba.rpi_ids[i] = base + i;
5852 
5853 		/* VPIs. */
5854 		count = phba->sli4_hba.max_cfg_param.max_vpi;
5855 		if (count <= 0) {
5856 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5857 					"3280 Invalid provisioning of "
5858 					"vpi:%d\n", count);
5859 			rc = -EINVAL;
5860 			goto free_rpi_ids;
5861 		}
5862 		base = phba->sli4_hba.max_cfg_param.vpi_base;
5863 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5864 		phba->vpi_bmask = kzalloc(longs *
5865 					  sizeof(unsigned long),
5866 					  GFP_KERNEL);
5867 		if (unlikely(!phba->vpi_bmask)) {
5868 			rc = -ENOMEM;
5869 			goto free_rpi_ids;
5870 		}
5871 		phba->vpi_ids = kzalloc(count *
5872 					sizeof(uint16_t),
5873 					GFP_KERNEL);
5874 		if (unlikely(!phba->vpi_ids)) {
5875 			rc = -ENOMEM;
5876 			goto free_vpi_bmask;
5877 		}
5878 
5879 		for (i = 0; i < count; i++)
5880 			phba->vpi_ids[i] = base + i;
5881 
5882 		/* XRIs. */
5883 		count = phba->sli4_hba.max_cfg_param.max_xri;
5884 		if (count <= 0) {
5885 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5886 					"3281 Invalid provisioning of "
5887 					"xri:%d\n", count);
5888 			rc = -EINVAL;
5889 			goto free_vpi_ids;
5890 		}
5891 		base = phba->sli4_hba.max_cfg_param.xri_base;
5892 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5893 		phba->sli4_hba.xri_bmask = kzalloc(longs *
5894 						   sizeof(unsigned long),
5895 						   GFP_KERNEL);
5896 		if (unlikely(!phba->sli4_hba.xri_bmask)) {
5897 			rc = -ENOMEM;
5898 			goto free_vpi_ids;
5899 		}
5900 		phba->sli4_hba.max_cfg_param.xri_used = 0;
5901 		phba->sli4_hba.xri_ids = kzalloc(count *
5902 						 sizeof(uint16_t),
5903 						 GFP_KERNEL);
5904 		if (unlikely(!phba->sli4_hba.xri_ids)) {
5905 			rc = -ENOMEM;
5906 			goto free_xri_bmask;
5907 		}
5908 
5909 		for (i = 0; i < count; i++)
5910 			phba->sli4_hba.xri_ids[i] = base + i;
5911 
5912 		/* VFIs. */
5913 		count = phba->sli4_hba.max_cfg_param.max_vfi;
5914 		if (count <= 0) {
5915 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5916 					"3282 Invalid provisioning of "
5917 					"vfi:%d\n", count);
5918 			rc = -EINVAL;
5919 			goto free_xri_ids;
5920 		}
5921 		base = phba->sli4_hba.max_cfg_param.vfi_base;
5922 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5923 		phba->sli4_hba.vfi_bmask = kzalloc(longs *
5924 						   sizeof(unsigned long),
5925 						   GFP_KERNEL);
5926 		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5927 			rc = -ENOMEM;
5928 			goto free_xri_ids;
5929 		}
5930 		phba->sli4_hba.vfi_ids = kzalloc(count *
5931 						 sizeof(uint16_t),
5932 						 GFP_KERNEL);
5933 		if (unlikely(!phba->sli4_hba.vfi_ids)) {
5934 			rc = -ENOMEM;
5935 			goto free_vfi_bmask;
5936 		}
5937 
5938 		for (i = 0; i < count; i++)
5939 			phba->sli4_hba.vfi_ids[i] = base + i;
5940 
5941 		/*
5942 		 * Mark all resources ready.  An HBA reset doesn't need
5943 		 * to reset the initialization.
5944 		 */
5945 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5946 		       LPFC_IDX_RSRC_RDY);
5947 		return 0;
5948 	}
5949 
5950  free_vfi_bmask:
5951 	kfree(phba->sli4_hba.vfi_bmask);
5952  free_xri_ids:
5953 	kfree(phba->sli4_hba.xri_ids);
5954  free_xri_bmask:
5955 	kfree(phba->sli4_hba.xri_bmask);
5956  free_vpi_ids:
5957 	kfree(phba->vpi_ids);
5958  free_vpi_bmask:
5959 	kfree(phba->vpi_bmask);
5960  free_rpi_ids:
5961 	kfree(phba->sli4_hba.rpi_ids);
5962  free_rpi_bmask:
5963 	kfree(phba->sli4_hba.rpi_bmask);
5964  err_exit:
5965 	return rc;
5966 }
5967 
5968 /**
5969  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5970  * @phba: Pointer to HBA context object.
5971  *
5972  * This function allocates the number of elements for the specified
5973  * resource type.
5974  **/
5975 int
5976 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5977 {
5978 	if (phba->sli4_hba.extents_in_use) {
5979 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5980 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5981 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5982 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5983 	} else {
5984 		kfree(phba->vpi_bmask);
5985 		phba->sli4_hba.max_cfg_param.vpi_used = 0;
5986 		kfree(phba->vpi_ids);
5987 		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5988 		kfree(phba->sli4_hba.xri_bmask);
5989 		kfree(phba->sli4_hba.xri_ids);
5990 		kfree(phba->sli4_hba.vfi_bmask);
5991 		kfree(phba->sli4_hba.vfi_ids);
5992 		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5993 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5994 	}
5995 
5996 	return 0;
5997 }
5998 
5999 /**
6000  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6001  * @phba: Pointer to HBA context object.
6002  * @type: The resource extent type.
6003  * @extnt_count: buffer to hold port extent count response
6004  * @extnt_size: buffer to hold port extent size response.
6005  *
6006  * This function calls the port to read the host allocated extents
6007  * for a particular type.
6008  **/
6009 int
6010 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6011 			       uint16_t *extnt_cnt, uint16_t *extnt_size)
6012 {
6013 	bool emb;
6014 	int rc = 0;
6015 	uint16_t curr_blks = 0;
6016 	uint32_t req_len, emb_len;
6017 	uint32_t alloc_len, mbox_tmo;
6018 	struct list_head *blk_list_head;
6019 	struct lpfc_rsrc_blks *rsrc_blk;
6020 	LPFC_MBOXQ_t *mbox;
6021 	void *virtaddr = NULL;
6022 	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6023 	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6024 	union  lpfc_sli4_cfg_shdr *shdr;
6025 
6026 	switch (type) {
6027 	case LPFC_RSC_TYPE_FCOE_VPI:
6028 		blk_list_head = &phba->lpfc_vpi_blk_list;
6029 		break;
6030 	case LPFC_RSC_TYPE_FCOE_XRI:
6031 		blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6032 		break;
6033 	case LPFC_RSC_TYPE_FCOE_VFI:
6034 		blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6035 		break;
6036 	case LPFC_RSC_TYPE_FCOE_RPI:
6037 		blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6038 		break;
6039 	default:
6040 		return -EIO;
6041 	}
6042 
6043 	/* Count the number of extents currently allocatd for this type. */
6044 	list_for_each_entry(rsrc_blk, blk_list_head, list) {
6045 		if (curr_blks == 0) {
6046 			/*
6047 			 * The GET_ALLOCATED mailbox does not return the size,
6048 			 * just the count.  The size should be just the size
6049 			 * stored in the current allocated block and all sizes
6050 			 * for an extent type are the same so set the return
6051 			 * value now.
6052 			 */
6053 			*extnt_size = rsrc_blk->rsrc_size;
6054 		}
6055 		curr_blks++;
6056 	}
6057 
6058 	/*
6059 	 * Calculate the size of an embedded mailbox.  The uint32_t
6060 	 * accounts for extents-specific word.
6061 	 */
6062 	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6063 		sizeof(uint32_t);
6064 
6065 	/*
6066 	 * Presume the allocation and response will fit into an embedded
6067 	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6068 	 */
6069 	emb = LPFC_SLI4_MBX_EMBED;
6070 	req_len = emb_len;
6071 	if (req_len > emb_len) {
6072 		req_len = curr_blks * sizeof(uint16_t) +
6073 			sizeof(union lpfc_sli4_cfg_shdr) +
6074 			sizeof(uint32_t);
6075 		emb = LPFC_SLI4_MBX_NEMBED;
6076 	}
6077 
6078 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6079 	if (!mbox)
6080 		return -ENOMEM;
6081 	memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6082 
6083 	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6084 				     LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6085 				     req_len, emb);
6086 	if (alloc_len < req_len) {
6087 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6088 			"2983 Allocated DMA memory size (x%x) is "
6089 			"less than the requested DMA memory "
6090 			"size (x%x)\n", alloc_len, req_len);
6091 		rc = -ENOMEM;
6092 		goto err_exit;
6093 	}
6094 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6095 	if (unlikely(rc)) {
6096 		rc = -EIO;
6097 		goto err_exit;
6098 	}
6099 
6100 	if (!phba->sli4_hba.intr_enable)
6101 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6102 	else {
6103 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6104 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6105 	}
6106 
6107 	if (unlikely(rc)) {
6108 		rc = -EIO;
6109 		goto err_exit;
6110 	}
6111 
6112 	/*
6113 	 * Figure out where the response is located.  Then get local pointers
6114 	 * to the response data.  The port does not guarantee to respond to
6115 	 * all extents counts request so update the local variable with the
6116 	 * allocated count from the port.
6117 	 */
6118 	if (emb == LPFC_SLI4_MBX_EMBED) {
6119 		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6120 		shdr = &rsrc_ext->header.cfg_shdr;
6121 		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6122 	} else {
6123 		virtaddr = mbox->sge_array->addr[0];
6124 		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6125 		shdr = &n_rsrc->cfg_shdr;
6126 		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6127 	}
6128 
6129 	if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6130 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6131 			"2984 Failed to read allocated resources "
6132 			"for type %d - Status 0x%x Add'l Status 0x%x.\n",
6133 			type,
6134 			bf_get(lpfc_mbox_hdr_status, &shdr->response),
6135 			bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6136 		rc = -EIO;
6137 		goto err_exit;
6138 	}
6139  err_exit:
6140 	lpfc_sli4_mbox_cmd_free(phba, mbox);
6141 	return rc;
6142 }
6143 
6144 /**
6145  * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
6146  * @phba: pointer to lpfc hba data structure.
6147  *
6148  * This routine walks the list of els buffers that have been allocated and
6149  * repost them to the port by using SGL block post. This is needed after a
6150  * pci_function_reset/warm_start or start. It attempts to construct blocks
6151  * of els buffer sgls which contains contiguous xris and uses the non-embedded
6152  * SGL block post mailbox commands to post them to the port. For single els
6153  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6154  * mailbox command for posting.
6155  *
6156  * Returns: 0 = success, non-zero failure.
6157  **/
6158 static int
6159 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6160 {
6161 	struct lpfc_sglq *sglq_entry = NULL;
6162 	struct lpfc_sglq *sglq_entry_next = NULL;
6163 	struct lpfc_sglq *sglq_entry_first = NULL;
6164 	int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
6165 	int last_xritag = NO_XRI;
6166 	struct lpfc_sli_ring *pring;
6167 	LIST_HEAD(prep_sgl_list);
6168 	LIST_HEAD(blck_sgl_list);
6169 	LIST_HEAD(allc_sgl_list);
6170 	LIST_HEAD(post_sgl_list);
6171 	LIST_HEAD(free_sgl_list);
6172 
6173 	pring = &phba->sli.ring[LPFC_ELS_RING];
6174 	spin_lock_irq(&phba->hbalock);
6175 	spin_lock(&pring->ring_lock);
6176 	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
6177 	spin_unlock(&pring->ring_lock);
6178 	spin_unlock_irq(&phba->hbalock);
6179 
6180 	total_cnt = phba->sli4_hba.els_xri_cnt;
6181 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6182 				 &allc_sgl_list, list) {
6183 		list_del_init(&sglq_entry->list);
6184 		block_cnt++;
6185 		if ((last_xritag != NO_XRI) &&
6186 		    (sglq_entry->sli4_xritag != last_xritag + 1)) {
6187 			/* a hole in xri block, form a sgl posting block */
6188 			list_splice_init(&prep_sgl_list, &blck_sgl_list);
6189 			post_cnt = block_cnt - 1;
6190 			/* prepare list for next posting block */
6191 			list_add_tail(&sglq_entry->list, &prep_sgl_list);
6192 			block_cnt = 1;
6193 		} else {
6194 			/* prepare list for next posting block */
6195 			list_add_tail(&sglq_entry->list, &prep_sgl_list);
6196 			/* enough sgls for non-embed sgl mbox command */
6197 			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6198 				list_splice_init(&prep_sgl_list,
6199 						 &blck_sgl_list);
6200 				post_cnt = block_cnt;
6201 				block_cnt = 0;
6202 			}
6203 		}
6204 		num_posted++;
6205 
6206 		/* keep track of last sgl's xritag */
6207 		last_xritag = sglq_entry->sli4_xritag;
6208 
6209 		/* end of repost sgl list condition for els buffers */
6210 		if (num_posted == phba->sli4_hba.els_xri_cnt) {
6211 			if (post_cnt == 0) {
6212 				list_splice_init(&prep_sgl_list,
6213 						 &blck_sgl_list);
6214 				post_cnt = block_cnt;
6215 			} else if (block_cnt == 1) {
6216 				status = lpfc_sli4_post_sgl(phba,
6217 						sglq_entry->phys, 0,
6218 						sglq_entry->sli4_xritag);
6219 				if (!status) {
6220 					/* successful, put sgl to posted list */
6221 					list_add_tail(&sglq_entry->list,
6222 						      &post_sgl_list);
6223 				} else {
6224 					/* Failure, put sgl to free list */
6225 					lpfc_printf_log(phba, KERN_WARNING,
6226 						LOG_SLI,
6227 						"3159 Failed to post els "
6228 						"sgl, xritag:x%x\n",
6229 						sglq_entry->sli4_xritag);
6230 					list_add_tail(&sglq_entry->list,
6231 						      &free_sgl_list);
6232 					total_cnt--;
6233 				}
6234 			}
6235 		}
6236 
6237 		/* continue until a nembed page worth of sgls */
6238 		if (post_cnt == 0)
6239 			continue;
6240 
6241 		/* post the els buffer list sgls as a block */
6242 		status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
6243 						     post_cnt);
6244 
6245 		if (!status) {
6246 			/* success, put sgl list to posted sgl list */
6247 			list_splice_init(&blck_sgl_list, &post_sgl_list);
6248 		} else {
6249 			/* Failure, put sgl list to free sgl list */
6250 			sglq_entry_first = list_first_entry(&blck_sgl_list,
6251 							    struct lpfc_sglq,
6252 							    list);
6253 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6254 					"3160 Failed to post els sgl-list, "
6255 					"xritag:x%x-x%x\n",
6256 					sglq_entry_first->sli4_xritag,
6257 					(sglq_entry_first->sli4_xritag +
6258 					 post_cnt - 1));
6259 			list_splice_init(&blck_sgl_list, &free_sgl_list);
6260 			total_cnt -= post_cnt;
6261 		}
6262 
6263 		/* don't reset xirtag due to hole in xri block */
6264 		if (block_cnt == 0)
6265 			last_xritag = NO_XRI;
6266 
6267 		/* reset els sgl post count for next round of posting */
6268 		post_cnt = 0;
6269 	}
6270 	/* update the number of XRIs posted for ELS */
6271 	phba->sli4_hba.els_xri_cnt = total_cnt;
6272 
6273 	/* free the els sgls failed to post */
6274 	lpfc_free_sgl_list(phba, &free_sgl_list);
6275 
6276 	/* push els sgls posted to the availble list */
6277 	if (!list_empty(&post_sgl_list)) {
6278 		spin_lock_irq(&phba->hbalock);
6279 		spin_lock(&pring->ring_lock);
6280 		list_splice_init(&post_sgl_list,
6281 				 &phba->sli4_hba.lpfc_sgl_list);
6282 		spin_unlock(&pring->ring_lock);
6283 		spin_unlock_irq(&phba->hbalock);
6284 	} else {
6285 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6286 				"3161 Failure to post els sgl to port.\n");
6287 		return -EIO;
6288 	}
6289 	return 0;
6290 }
6291 
6292 /**
6293  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6294  * @phba: Pointer to HBA context object.
6295  *
6296  * This function is the main SLI4 device intialization PCI function. This
6297  * function is called by the HBA intialization code, HBA reset code and
6298  * HBA error attention handler code. Caller is not required to hold any
6299  * locks.
6300  **/
6301 int
6302 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6303 {
6304 	int rc;
6305 	LPFC_MBOXQ_t *mboxq;
6306 	struct lpfc_mqe *mqe;
6307 	uint8_t *vpd;
6308 	uint32_t vpd_size;
6309 	uint32_t ftr_rsp = 0;
6310 	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6311 	struct lpfc_vport *vport = phba->pport;
6312 	struct lpfc_dmabuf *mp;
6313 
6314 	/* Perform a PCI function reset to start from clean */
6315 	rc = lpfc_pci_function_reset(phba);
6316 	if (unlikely(rc))
6317 		return -ENODEV;
6318 
6319 	/* Check the HBA Host Status Register for readyness */
6320 	rc = lpfc_sli4_post_status_check(phba);
6321 	if (unlikely(rc))
6322 		return -ENODEV;
6323 	else {
6324 		spin_lock_irq(&phba->hbalock);
6325 		phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6326 		spin_unlock_irq(&phba->hbalock);
6327 	}
6328 
6329 	/*
6330 	 * Allocate a single mailbox container for initializing the
6331 	 * port.
6332 	 */
6333 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6334 	if (!mboxq)
6335 		return -ENOMEM;
6336 
6337 	/* Issue READ_REV to collect vpd and FW information. */
6338 	vpd_size = SLI4_PAGE_SIZE;
6339 	vpd = kzalloc(vpd_size, GFP_KERNEL);
6340 	if (!vpd) {
6341 		rc = -ENOMEM;
6342 		goto out_free_mbox;
6343 	}
6344 
6345 	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6346 	if (unlikely(rc)) {
6347 		kfree(vpd);
6348 		goto out_free_mbox;
6349 	}
6350 
6351 	mqe = &mboxq->u.mqe;
6352 	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6353 	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
6354 		phba->hba_flag |= HBA_FCOE_MODE;
6355 		phba->fcp_embed_io = 0;	/* SLI4 FC support only */
6356 	} else {
6357 		phba->hba_flag &= ~HBA_FCOE_MODE;
6358 	}
6359 
6360 	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6361 		LPFC_DCBX_CEE_MODE)
6362 		phba->hba_flag |= HBA_FIP_SUPPORT;
6363 	else
6364 		phba->hba_flag &= ~HBA_FIP_SUPPORT;
6365 
6366 	phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6367 
6368 	if (phba->sli_rev != LPFC_SLI_REV4) {
6369 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6370 			"0376 READ_REV Error. SLI Level %d "
6371 			"FCoE enabled %d\n",
6372 			phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6373 		rc = -EIO;
6374 		kfree(vpd);
6375 		goto out_free_mbox;
6376 	}
6377 
6378 	/*
6379 	 * Continue initialization with default values even if driver failed
6380 	 * to read FCoE param config regions, only read parameters if the
6381 	 * board is FCoE
6382 	 */
6383 	if (phba->hba_flag & HBA_FCOE_MODE &&
6384 	    lpfc_sli4_read_fcoe_params(phba))
6385 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6386 			"2570 Failed to read FCoE parameters\n");
6387 
6388 	/*
6389 	 * Retrieve sli4 device physical port name, failure of doing it
6390 	 * is considered as non-fatal.
6391 	 */
6392 	rc = lpfc_sli4_retrieve_pport_name(phba);
6393 	if (!rc)
6394 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6395 				"3080 Successful retrieving SLI4 device "
6396 				"physical port name: %s.\n", phba->Port);
6397 
6398 	/*
6399 	 * Evaluate the read rev and vpd data. Populate the driver
6400 	 * state with the results. If this routine fails, the failure
6401 	 * is not fatal as the driver will use generic values.
6402 	 */
6403 	rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6404 	if (unlikely(!rc)) {
6405 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6406 				"0377 Error %d parsing vpd. "
6407 				"Using defaults.\n", rc);
6408 		rc = 0;
6409 	}
6410 	kfree(vpd);
6411 
6412 	/* Save information as VPD data */
6413 	phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6414 	phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6415 	phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6416 	phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6417 					 &mqe->un.read_rev);
6418 	phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6419 				       &mqe->un.read_rev);
6420 	phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6421 					    &mqe->un.read_rev);
6422 	phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6423 					   &mqe->un.read_rev);
6424 	phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6425 	memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6426 	phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6427 	memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6428 	phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6429 	memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6430 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6431 			"(%d):0380 READ_REV Status x%x "
6432 			"fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6433 			mboxq->vport ? mboxq->vport->vpi : 0,
6434 			bf_get(lpfc_mqe_status, mqe),
6435 			phba->vpd.rev.opFwName,
6436 			phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6437 			phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6438 
6439 	/* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
6440 	rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6441 	if (phba->pport->cfg_lun_queue_depth > rc) {
6442 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6443 				"3362 LUN queue depth changed from %d to %d\n",
6444 				phba->pport->cfg_lun_queue_depth, rc);
6445 		phba->pport->cfg_lun_queue_depth = rc;
6446 	}
6447 
6448 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6449 	    LPFC_SLI_INTF_IF_TYPE_0) {
6450 		lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6451 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6452 		if (rc == MBX_SUCCESS) {
6453 			phba->hba_flag |= HBA_RECOVERABLE_UE;
6454 			/* Set 1Sec interval to detect UE */
6455 			phba->eratt_poll_interval = 1;
6456 			phba->sli4_hba.ue_to_sr = bf_get(
6457 					lpfc_mbx_set_feature_UESR,
6458 					&mboxq->u.mqe.un.set_feature);
6459 			phba->sli4_hba.ue_to_rp = bf_get(
6460 					lpfc_mbx_set_feature_UERP,
6461 					&mboxq->u.mqe.un.set_feature);
6462 		}
6463 	}
6464 
6465 	if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6466 		/* Enable MDS Diagnostics only if the SLI Port supports it */
6467 		lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6468 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6469 		if (rc != MBX_SUCCESS)
6470 			phba->mds_diags_support = 0;
6471 	}
6472 
6473 	/*
6474 	 * Discover the port's supported feature set and match it against the
6475 	 * hosts requests.
6476 	 */
6477 	lpfc_request_features(phba, mboxq);
6478 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6479 	if (unlikely(rc)) {
6480 		rc = -EIO;
6481 		goto out_free_mbox;
6482 	}
6483 
6484 	/*
6485 	 * The port must support FCP initiator mode as this is the
6486 	 * only mode running in the host.
6487 	 */
6488 	if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6489 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6490 				"0378 No support for fcpi mode.\n");
6491 		ftr_rsp++;
6492 	}
6493 	if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6494 		phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6495 	else
6496 		phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6497 	/*
6498 	 * If the port cannot support the host's requested features
6499 	 * then turn off the global config parameters to disable the
6500 	 * feature in the driver.  This is not a fatal error.
6501 	 */
6502 	phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6503 	if (phba->cfg_enable_bg) {
6504 		if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6505 			phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6506 		else
6507 			ftr_rsp++;
6508 	}
6509 
6510 	if (phba->max_vpi && phba->cfg_enable_npiv &&
6511 	    !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6512 		ftr_rsp++;
6513 
6514 	if (ftr_rsp) {
6515 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6516 				"0379 Feature Mismatch Data: x%08x %08x "
6517 				"x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6518 				mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6519 				phba->cfg_enable_npiv, phba->max_vpi);
6520 		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6521 			phba->cfg_enable_bg = 0;
6522 		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6523 			phba->cfg_enable_npiv = 0;
6524 	}
6525 
6526 	/* These SLI3 features are assumed in SLI4 */
6527 	spin_lock_irq(&phba->hbalock);
6528 	phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6529 	spin_unlock_irq(&phba->hbalock);
6530 
6531 	/*
6532 	 * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
6533 	 * calls depends on these resources to complete port setup.
6534 	 */
6535 	rc = lpfc_sli4_alloc_resource_identifiers(phba);
6536 	if (rc) {
6537 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6538 				"2920 Failed to alloc Resource IDs "
6539 				"rc = x%x\n", rc);
6540 		goto out_free_mbox;
6541 	}
6542 
6543 	/* Read the port's service parameters. */
6544 	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6545 	if (rc) {
6546 		phba->link_state = LPFC_HBA_ERROR;
6547 		rc = -ENOMEM;
6548 		goto out_free_mbox;
6549 	}
6550 
6551 	mboxq->vport = vport;
6552 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6553 	mp = (struct lpfc_dmabuf *) mboxq->context1;
6554 	if (rc == MBX_SUCCESS) {
6555 		memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6556 		rc = 0;
6557 	}
6558 
6559 	/*
6560 	 * This memory was allocated by the lpfc_read_sparam routine. Release
6561 	 * it to the mbuf pool.
6562 	 */
6563 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
6564 	kfree(mp);
6565 	mboxq->context1 = NULL;
6566 	if (unlikely(rc)) {
6567 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6568 				"0382 READ_SPARAM command failed "
6569 				"status %d, mbxStatus x%x\n",
6570 				rc, bf_get(lpfc_mqe_status, mqe));
6571 		phba->link_state = LPFC_HBA_ERROR;
6572 		rc = -EIO;
6573 		goto out_free_mbox;
6574 	}
6575 
6576 	lpfc_update_vport_wwn(vport);
6577 
6578 	/* Update the fc_host data structures with new wwn. */
6579 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6580 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6581 
6582 	/* update host els and scsi xri-sgl sizes and mappings */
6583 	rc = lpfc_sli4_xri_sgl_update(phba);
6584 	if (unlikely(rc)) {
6585 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6586 				"1400 Failed to update xri-sgl size and "
6587 				"mapping: %d\n", rc);
6588 		goto out_free_mbox;
6589 	}
6590 
6591 	/* register the els sgl pool to the port */
6592 	rc = lpfc_sli4_repost_els_sgl_list(phba);
6593 	if (unlikely(rc)) {
6594 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6595 				"0582 Error %d during els sgl post "
6596 				"operation\n", rc);
6597 		rc = -ENODEV;
6598 		goto out_free_mbox;
6599 	}
6600 
6601 	/* register the allocated scsi sgl pool to the port */
6602 	rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6603 	if (unlikely(rc)) {
6604 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6605 				"0383 Error %d during scsi sgl post "
6606 				"operation\n", rc);
6607 		/* Some Scsi buffers were moved to the abort scsi list */
6608 		/* A pci function reset will repost them */
6609 		rc = -ENODEV;
6610 		goto out_free_mbox;
6611 	}
6612 
6613 	/* Post the rpi header region to the device. */
6614 	rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6615 	if (unlikely(rc)) {
6616 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6617 				"0393 Error %d during rpi post operation\n",
6618 				rc);
6619 		rc = -ENODEV;
6620 		goto out_free_mbox;
6621 	}
6622 	lpfc_sli4_node_prep(phba);
6623 
6624 	/* Create all the SLI4 queues */
6625 	rc = lpfc_sli4_queue_create(phba);
6626 	if (rc) {
6627 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6628 				"3089 Failed to allocate queues\n");
6629 		rc = -ENODEV;
6630 		goto out_stop_timers;
6631 	}
6632 	/* Set up all the queues to the device */
6633 	rc = lpfc_sli4_queue_setup(phba);
6634 	if (unlikely(rc)) {
6635 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6636 				"0381 Error %d during queue setup.\n ", rc);
6637 		goto out_destroy_queue;
6638 	}
6639 
6640 	/* Arm the CQs and then EQs on device */
6641 	lpfc_sli4_arm_cqeq_intr(phba);
6642 
6643 	/* Indicate device interrupt mode */
6644 	phba->sli4_hba.intr_enable = 1;
6645 
6646 	/* Allow asynchronous mailbox command to go through */
6647 	spin_lock_irq(&phba->hbalock);
6648 	phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6649 	spin_unlock_irq(&phba->hbalock);
6650 
6651 	/* Post receive buffers to the device */
6652 	lpfc_sli4_rb_setup(phba);
6653 
6654 	/* Reset HBA FCF states after HBA reset */
6655 	phba->fcf.fcf_flag = 0;
6656 	phba->fcf.current_rec.flag = 0;
6657 
6658 	/* Start the ELS watchdog timer */
6659 	mod_timer(&vport->els_tmofunc,
6660 		  jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
6661 
6662 	/* Start heart beat timer */
6663 	mod_timer(&phba->hb_tmofunc,
6664 		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
6665 	phba->hb_outstanding = 0;
6666 	phba->last_completion_time = jiffies;
6667 
6668 	/* Start error attention (ERATT) polling timer */
6669 	mod_timer(&phba->eratt_poll,
6670 		  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
6671 
6672 	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
6673 	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6674 		rc = pci_enable_pcie_error_reporting(phba->pcidev);
6675 		if (!rc) {
6676 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6677 					"2829 This device supports "
6678 					"Advanced Error Reporting (AER)\n");
6679 			spin_lock_irq(&phba->hbalock);
6680 			phba->hba_flag |= HBA_AER_ENABLED;
6681 			spin_unlock_irq(&phba->hbalock);
6682 		} else {
6683 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6684 					"2830 This device does not support "
6685 					"Advanced Error Reporting (AER)\n");
6686 			phba->cfg_aer_support = 0;
6687 		}
6688 		rc = 0;
6689 	}
6690 
6691 	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6692 		/*
6693 		 * The FC Port needs to register FCFI (index 0)
6694 		 */
6695 		lpfc_reg_fcfi(phba, mboxq);
6696 		mboxq->vport = phba->pport;
6697 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6698 		if (rc != MBX_SUCCESS)
6699 			goto out_unset_queue;
6700 		rc = 0;
6701 		phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6702 					&mboxq->u.mqe.un.reg_fcfi);
6703 
6704 		/* Check if the port is configured to be disabled */
6705 		lpfc_sli_read_link_ste(phba);
6706 	}
6707 
6708 	/*
6709 	 * The port is ready, set the host's link state to LINK_DOWN
6710 	 * in preparation for link interrupts.
6711 	 */
6712 	spin_lock_irq(&phba->hbalock);
6713 	phba->link_state = LPFC_LINK_DOWN;
6714 	spin_unlock_irq(&phba->hbalock);
6715 	if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6716 	    (phba->hba_flag & LINK_DISABLED)) {
6717 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6718 				"3103 Adapter Link is disabled.\n");
6719 		lpfc_down_link(phba, mboxq);
6720 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6721 		if (rc != MBX_SUCCESS) {
6722 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6723 					"3104 Adapter failed to issue "
6724 					"DOWN_LINK mbox cmd, rc:x%x\n", rc);
6725 			goto out_unset_queue;
6726 		}
6727 	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6728 		/* don't perform init_link on SLI4 FC port loopback test */
6729 		if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6730 			rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6731 			if (rc)
6732 				goto out_unset_queue;
6733 		}
6734 	}
6735 	mempool_free(mboxq, phba->mbox_mem_pool);
6736 	return rc;
6737 out_unset_queue:
6738 	/* Unset all the queues set up in this routine when error out */
6739 	lpfc_sli4_queue_unset(phba);
6740 out_destroy_queue:
6741 	lpfc_sli4_queue_destroy(phba);
6742 out_stop_timers:
6743 	lpfc_stop_hba_timers(phba);
6744 out_free_mbox:
6745 	mempool_free(mboxq, phba->mbox_mem_pool);
6746 	return rc;
6747 }
6748 
6749 /**
6750  * lpfc_mbox_timeout - Timeout call back function for mbox timer
6751  * @ptr: context object - pointer to hba structure.
6752  *
6753  * This is the callback function for mailbox timer. The mailbox
6754  * timer is armed when a new mailbox command is issued and the timer
6755  * is deleted when the mailbox complete. The function is called by
6756  * the kernel timer code when a mailbox does not complete within
6757  * expected time. This function wakes up the worker thread to
6758  * process the mailbox timeout and returns. All the processing is
6759  * done by the worker thread function lpfc_mbox_timeout_handler.
6760  **/
6761 void
6762 lpfc_mbox_timeout(unsigned long ptr)
6763 {
6764 	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
6765 	unsigned long iflag;
6766 	uint32_t tmo_posted;
6767 
6768 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6769 	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6770 	if (!tmo_posted)
6771 		phba->pport->work_port_events |= WORKER_MBOX_TMO;
6772 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6773 
6774 	if (!tmo_posted)
6775 		lpfc_worker_wake_up(phba);
6776 	return;
6777 }
6778 
6779 /**
6780  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
6781  *                                    are pending
6782  * @phba: Pointer to HBA context object.
6783  *
6784  * This function checks if any mailbox completions are present on the mailbox
6785  * completion queue.
6786  **/
6787 static bool
6788 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
6789 {
6790 
6791 	uint32_t idx;
6792 	struct lpfc_queue *mcq;
6793 	struct lpfc_mcqe *mcqe;
6794 	bool pending_completions = false;
6795 
6796 	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
6797 		return false;
6798 
6799 	/* Check for completions on mailbox completion queue */
6800 
6801 	mcq = phba->sli4_hba.mbx_cq;
6802 	idx = mcq->hba_index;
6803 	while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
6804 		mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
6805 		if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
6806 		    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
6807 			pending_completions = true;
6808 			break;
6809 		}
6810 		idx = (idx + 1) % mcq->entry_count;
6811 		if (mcq->hba_index == idx)
6812 			break;
6813 	}
6814 	return pending_completions;
6815 
6816 }
6817 
6818 /**
6819  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
6820  *					      that were missed.
6821  * @phba: Pointer to HBA context object.
6822  *
6823  * For sli4, it is possible to miss an interrupt. As such mbox completions
6824  * maybe missed causing erroneous mailbox timeouts to occur. This function
6825  * checks to see if mbox completions are on the mailbox completion queue
6826  * and will process all the completions associated with the eq for the
6827  * mailbox completion queue.
6828  **/
6829 bool
6830 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
6831 {
6832 
6833 	uint32_t eqidx;
6834 	struct lpfc_queue *fpeq = NULL;
6835 	struct lpfc_eqe *eqe;
6836 	bool mbox_pending;
6837 
6838 	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
6839 		return false;
6840 
6841 	/* Find the eq associated with the mcq */
6842 
6843 	if (phba->sli4_hba.hba_eq)
6844 		for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
6845 			if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
6846 			    phba->sli4_hba.mbx_cq->assoc_qid) {
6847 				fpeq = phba->sli4_hba.hba_eq[eqidx];
6848 				break;
6849 			}
6850 	if (!fpeq)
6851 		return false;
6852 
6853 	/* Turn off interrupts from this EQ */
6854 
6855 	lpfc_sli4_eq_clr_intr(fpeq);
6856 
6857 	/* Check to see if a mbox completion is pending */
6858 
6859 	mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
6860 
6861 	/*
6862 	 * If a mbox completion is pending, process all the events on EQ
6863 	 * associated with the mbox completion queue (this could include
6864 	 * mailbox commands, async events, els commands, receive queue data
6865 	 * and fcp commands)
6866 	 */
6867 
6868 	if (mbox_pending)
6869 		while ((eqe = lpfc_sli4_eq_get(fpeq))) {
6870 			lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
6871 			fpeq->EQ_processed++;
6872 		}
6873 
6874 	/* Always clear and re-arm the EQ */
6875 
6876 	lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
6877 
6878 	return mbox_pending;
6879 
6880 }
6881 
6882 /**
6883  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6884  * @phba: Pointer to HBA context object.
6885  *
6886  * This function is called from worker thread when a mailbox command times out.
6887  * The caller is not required to hold any locks. This function will reset the
6888  * HBA and recover all the pending commands.
6889  **/
6890 void
6891 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6892 {
6893 	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6894 	MAILBOX_t *mb = NULL;
6895 
6896 	struct lpfc_sli *psli = &phba->sli;
6897 
6898 	/* If the mailbox completed, process the completion and return */
6899 	if (lpfc_sli4_process_missed_mbox_completions(phba))
6900 		return;
6901 
6902 	if (pmbox != NULL)
6903 		mb = &pmbox->u.mb;
6904 	/* Check the pmbox pointer first.  There is a race condition
6905 	 * between the mbox timeout handler getting executed in the
6906 	 * worklist and the mailbox actually completing. When this
6907 	 * race condition occurs, the mbox_active will be NULL.
6908 	 */
6909 	spin_lock_irq(&phba->hbalock);
6910 	if (pmbox == NULL) {
6911 		lpfc_printf_log(phba, KERN_WARNING,
6912 				LOG_MBOX | LOG_SLI,
6913 				"0353 Active Mailbox cleared - mailbox timeout "
6914 				"exiting\n");
6915 		spin_unlock_irq(&phba->hbalock);
6916 		return;
6917 	}
6918 
6919 	/* Mbox cmd <mbxCommand> timeout */
6920 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6921 			"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6922 			mb->mbxCommand,
6923 			phba->pport->port_state,
6924 			phba->sli.sli_flag,
6925 			phba->sli.mbox_active);
6926 	spin_unlock_irq(&phba->hbalock);
6927 
6928 	/* Setting state unknown so lpfc_sli_abort_iocb_ring
6929 	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
6930 	 * it to fail all outstanding SCSI IO.
6931 	 */
6932 	spin_lock_irq(&phba->pport->work_port_lock);
6933 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6934 	spin_unlock_irq(&phba->pport->work_port_lock);
6935 	spin_lock_irq(&phba->hbalock);
6936 	phba->link_state = LPFC_LINK_UNKNOWN;
6937 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
6938 	spin_unlock_irq(&phba->hbalock);
6939 
6940 	lpfc_sli_abort_fcp_rings(phba);
6941 
6942 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6943 			"0345 Resetting board due to mailbox timeout\n");
6944 
6945 	/* Reset the HBA device */
6946 	lpfc_reset_hba(phba);
6947 }
6948 
6949 /**
6950  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6951  * @phba: Pointer to HBA context object.
6952  * @pmbox: Pointer to mailbox object.
6953  * @flag: Flag indicating how the mailbox need to be processed.
6954  *
6955  * This function is called by discovery code and HBA management code
6956  * to submit a mailbox command to firmware with SLI-3 interface spec. This
6957  * function gets the hbalock to protect the data structures.
6958  * The mailbox command can be submitted in polling mode, in which case
6959  * this function will wait in a polling loop for the completion of the
6960  * mailbox.
6961  * If the mailbox is submitted in no_wait mode (not polling) the
6962  * function will submit the command and returns immediately without waiting
6963  * for the mailbox completion. The no_wait is supported only when HBA
6964  * is in SLI2/SLI3 mode - interrupts are enabled.
6965  * The SLI interface allows only one mailbox pending at a time. If the
6966  * mailbox is issued in polling mode and there is already a mailbox
6967  * pending, then the function will return an error. If the mailbox is issued
6968  * in NO_WAIT mode and there is a mailbox pending already, the function
6969  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6970  * The sli layer owns the mailbox object until the completion of mailbox
6971  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6972  * return codes the caller owns the mailbox command after the return of
6973  * the function.
6974  **/
6975 static int
6976 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6977 		       uint32_t flag)
6978 {
6979 	MAILBOX_t *mbx;
6980 	struct lpfc_sli *psli = &phba->sli;
6981 	uint32_t status, evtctr;
6982 	uint32_t ha_copy, hc_copy;
6983 	int i;
6984 	unsigned long timeout;
6985 	unsigned long drvr_flag = 0;
6986 	uint32_t word0, ldata;
6987 	void __iomem *to_slim;
6988 	int processing_queue = 0;
6989 
6990 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
6991 	if (!pmbox) {
6992 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6993 		/* processing mbox queue from intr_handler */
6994 		if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6995 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6996 			return MBX_SUCCESS;
6997 		}
6998 		processing_queue = 1;
6999 		pmbox = lpfc_mbox_get(phba);
7000 		if (!pmbox) {
7001 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7002 			return MBX_SUCCESS;
7003 		}
7004 	}
7005 
7006 	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
7007 		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
7008 		if(!pmbox->vport) {
7009 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7010 			lpfc_printf_log(phba, KERN_ERR,
7011 					LOG_MBOX | LOG_VPORT,
7012 					"1806 Mbox x%x failed. No vport\n",
7013 					pmbox->u.mb.mbxCommand);
7014 			dump_stack();
7015 			goto out_not_finished;
7016 		}
7017 	}
7018 
7019 	/* If the PCI channel is in offline state, do not post mbox. */
7020 	if (unlikely(pci_channel_offline(phba->pcidev))) {
7021 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7022 		goto out_not_finished;
7023 	}
7024 
7025 	/* If HBA has a deferred error attention, fail the iocb. */
7026 	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7027 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7028 		goto out_not_finished;
7029 	}
7030 
7031 	psli = &phba->sli;
7032 
7033 	mbx = &pmbox->u.mb;
7034 	status = MBX_SUCCESS;
7035 
7036 	if (phba->link_state == LPFC_HBA_ERROR) {
7037 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7038 
7039 		/* Mbox command <mbxCommand> cannot issue */
7040 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7041 				"(%d):0311 Mailbox command x%x cannot "
7042 				"issue Data: x%x x%x\n",
7043 				pmbox->vport ? pmbox->vport->vpi : 0,
7044 				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7045 		goto out_not_finished;
7046 	}
7047 
7048 	if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
7049 		if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7050 			!(hc_copy & HC_MBINT_ENA)) {
7051 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7052 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7053 				"(%d):2528 Mailbox command x%x cannot "
7054 				"issue Data: x%x x%x\n",
7055 				pmbox->vport ? pmbox->vport->vpi : 0,
7056 				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7057 			goto out_not_finished;
7058 		}
7059 	}
7060 
7061 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7062 		/* Polling for a mbox command when another one is already active
7063 		 * is not allowed in SLI. Also, the driver must have established
7064 		 * SLI2 mode to queue and process multiple mbox commands.
7065 		 */
7066 
7067 		if (flag & MBX_POLL) {
7068 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7069 
7070 			/* Mbox command <mbxCommand> cannot issue */
7071 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7072 					"(%d):2529 Mailbox command x%x "
7073 					"cannot issue Data: x%x x%x\n",
7074 					pmbox->vport ? pmbox->vport->vpi : 0,
7075 					pmbox->u.mb.mbxCommand,
7076 					psli->sli_flag, flag);
7077 			goto out_not_finished;
7078 		}
7079 
7080 		if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7081 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7082 			/* Mbox command <mbxCommand> cannot issue */
7083 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7084 					"(%d):2530 Mailbox command x%x "
7085 					"cannot issue Data: x%x x%x\n",
7086 					pmbox->vport ? pmbox->vport->vpi : 0,
7087 					pmbox->u.mb.mbxCommand,
7088 					psli->sli_flag, flag);
7089 			goto out_not_finished;
7090 		}
7091 
7092 		/* Another mailbox command is still being processed, queue this
7093 		 * command to be processed later.
7094 		 */
7095 		lpfc_mbox_put(phba, pmbox);
7096 
7097 		/* Mbox cmd issue - BUSY */
7098 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7099 				"(%d):0308 Mbox cmd issue - BUSY Data: "
7100 				"x%x x%x x%x x%x\n",
7101 				pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7102 				mbx->mbxCommand, phba->pport->port_state,
7103 				psli->sli_flag, flag);
7104 
7105 		psli->slistat.mbox_busy++;
7106 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7107 
7108 		if (pmbox->vport) {
7109 			lpfc_debugfs_disc_trc(pmbox->vport,
7110 				LPFC_DISC_TRC_MBOX_VPORT,
7111 				"MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
7112 				(uint32_t)mbx->mbxCommand,
7113 				mbx->un.varWords[0], mbx->un.varWords[1]);
7114 		}
7115 		else {
7116 			lpfc_debugfs_disc_trc(phba->pport,
7117 				LPFC_DISC_TRC_MBOX,
7118 				"MBOX Bsy:        cmd:x%x mb:x%x x%x",
7119 				(uint32_t)mbx->mbxCommand,
7120 				mbx->un.varWords[0], mbx->un.varWords[1]);
7121 		}
7122 
7123 		return MBX_BUSY;
7124 	}
7125 
7126 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7127 
7128 	/* If we are not polling, we MUST be in SLI2 mode */
7129 	if (flag != MBX_POLL) {
7130 		if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7131 		    (mbx->mbxCommand != MBX_KILL_BOARD)) {
7132 			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7133 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7134 			/* Mbox command <mbxCommand> cannot issue */
7135 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7136 					"(%d):2531 Mailbox command x%x "
7137 					"cannot issue Data: x%x x%x\n",
7138 					pmbox->vport ? pmbox->vport->vpi : 0,
7139 					pmbox->u.mb.mbxCommand,
7140 					psli->sli_flag, flag);
7141 			goto out_not_finished;
7142 		}
7143 		/* timeout active mbox command */
7144 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7145 					   1000);
7146 		mod_timer(&psli->mbox_tmo, jiffies + timeout);
7147 	}
7148 
7149 	/* Mailbox cmd <cmd> issue */
7150 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7151 			"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7152 			"x%x\n",
7153 			pmbox->vport ? pmbox->vport->vpi : 0,
7154 			mbx->mbxCommand, phba->pport->port_state,
7155 			psli->sli_flag, flag);
7156 
7157 	if (mbx->mbxCommand != MBX_HEARTBEAT) {
7158 		if (pmbox->vport) {
7159 			lpfc_debugfs_disc_trc(pmbox->vport,
7160 				LPFC_DISC_TRC_MBOX_VPORT,
7161 				"MBOX Send vport: cmd:x%x mb:x%x x%x",
7162 				(uint32_t)mbx->mbxCommand,
7163 				mbx->un.varWords[0], mbx->un.varWords[1]);
7164 		}
7165 		else {
7166 			lpfc_debugfs_disc_trc(phba->pport,
7167 				LPFC_DISC_TRC_MBOX,
7168 				"MBOX Send:       cmd:x%x mb:x%x x%x",
7169 				(uint32_t)mbx->mbxCommand,
7170 				mbx->un.varWords[0], mbx->un.varWords[1]);
7171 		}
7172 	}
7173 
7174 	psli->slistat.mbox_cmd++;
7175 	evtctr = psli->slistat.mbox_event;
7176 
7177 	/* next set own bit for the adapter and copy over command word */
7178 	mbx->mbxOwner = OWN_CHIP;
7179 
7180 	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7181 		/* Populate mbox extension offset word. */
7182 		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7183 			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7184 				= (uint8_t *)phba->mbox_ext
7185 				  - (uint8_t *)phba->mbox;
7186 		}
7187 
7188 		/* Copy the mailbox extension data */
7189 		if (pmbox->in_ext_byte_len && pmbox->context2) {
7190 			lpfc_sli_pcimem_bcopy(pmbox->context2,
7191 				(uint8_t *)phba->mbox_ext,
7192 				pmbox->in_ext_byte_len);
7193 		}
7194 		/* Copy command data to host SLIM area */
7195 		lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7196 	} else {
7197 		/* Populate mbox extension offset word. */
7198 		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7199 			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7200 				= MAILBOX_HBA_EXT_OFFSET;
7201 
7202 		/* Copy the mailbox extension data */
7203 		if (pmbox->in_ext_byte_len && pmbox->context2) {
7204 			lpfc_memcpy_to_slim(phba->MBslimaddr +
7205 				MAILBOX_HBA_EXT_OFFSET,
7206 				pmbox->context2, pmbox->in_ext_byte_len);
7207 
7208 		}
7209 		if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7210 			/* copy command data into host mbox for cmpl */
7211 			lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7212 		}
7213 
7214 		/* First copy mbox command data to HBA SLIM, skip past first
7215 		   word */
7216 		to_slim = phba->MBslimaddr + sizeof (uint32_t);
7217 		lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7218 			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
7219 
7220 		/* Next copy over first word, with mbxOwner set */
7221 		ldata = *((uint32_t *)mbx);
7222 		to_slim = phba->MBslimaddr;
7223 		writel(ldata, to_slim);
7224 		readl(to_slim); /* flush */
7225 
7226 		if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7227 			/* switch over to host mailbox */
7228 			psli->sli_flag |= LPFC_SLI_ACTIVE;
7229 		}
7230 	}
7231 
7232 	wmb();
7233 
7234 	switch (flag) {
7235 	case MBX_NOWAIT:
7236 		/* Set up reference to mailbox command */
7237 		psli->mbox_active = pmbox;
7238 		/* Interrupt board to do it */
7239 		writel(CA_MBATT, phba->CAregaddr);
7240 		readl(phba->CAregaddr); /* flush */
7241 		/* Don't wait for it to finish, just return */
7242 		break;
7243 
7244 	case MBX_POLL:
7245 		/* Set up null reference to mailbox command */
7246 		psli->mbox_active = NULL;
7247 		/* Interrupt board to do it */
7248 		writel(CA_MBATT, phba->CAregaddr);
7249 		readl(phba->CAregaddr); /* flush */
7250 
7251 		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7252 			/* First read mbox status word */
7253 			word0 = *((uint32_t *)phba->mbox);
7254 			word0 = le32_to_cpu(word0);
7255 		} else {
7256 			/* First read mbox status word */
7257 			if (lpfc_readl(phba->MBslimaddr, &word0)) {
7258 				spin_unlock_irqrestore(&phba->hbalock,
7259 						       drvr_flag);
7260 				goto out_not_finished;
7261 			}
7262 		}
7263 
7264 		/* Read the HBA Host Attention Register */
7265 		if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7266 			spin_unlock_irqrestore(&phba->hbalock,
7267 						       drvr_flag);
7268 			goto out_not_finished;
7269 		}
7270 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7271 							1000) + jiffies;
7272 		i = 0;
7273 		/* Wait for command to complete */
7274 		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7275 		       (!(ha_copy & HA_MBATT) &&
7276 			(phba->link_state > LPFC_WARM_START))) {
7277 			if (time_after(jiffies, timeout)) {
7278 				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7279 				spin_unlock_irqrestore(&phba->hbalock,
7280 						       drvr_flag);
7281 				goto out_not_finished;
7282 			}
7283 
7284 			/* Check if we took a mbox interrupt while we were
7285 			   polling */
7286 			if (((word0 & OWN_CHIP) != OWN_CHIP)
7287 			    && (evtctr != psli->slistat.mbox_event))
7288 				break;
7289 
7290 			if (i++ > 10) {
7291 				spin_unlock_irqrestore(&phba->hbalock,
7292 						       drvr_flag);
7293 				msleep(1);
7294 				spin_lock_irqsave(&phba->hbalock, drvr_flag);
7295 			}
7296 
7297 			if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7298 				/* First copy command data */
7299 				word0 = *((uint32_t *)phba->mbox);
7300 				word0 = le32_to_cpu(word0);
7301 				if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7302 					MAILBOX_t *slimmb;
7303 					uint32_t slimword0;
7304 					/* Check real SLIM for any errors */
7305 					slimword0 = readl(phba->MBslimaddr);
7306 					slimmb = (MAILBOX_t *) & slimword0;
7307 					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7308 					    && slimmb->mbxStatus) {
7309 						psli->sli_flag &=
7310 						    ~LPFC_SLI_ACTIVE;
7311 						word0 = slimword0;
7312 					}
7313 				}
7314 			} else {
7315 				/* First copy command data */
7316 				word0 = readl(phba->MBslimaddr);
7317 			}
7318 			/* Read the HBA Host Attention Register */
7319 			if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7320 				spin_unlock_irqrestore(&phba->hbalock,
7321 						       drvr_flag);
7322 				goto out_not_finished;
7323 			}
7324 		}
7325 
7326 		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7327 			/* copy results back to user */
7328 			lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
7329 			/* Copy the mailbox extension data */
7330 			if (pmbox->out_ext_byte_len && pmbox->context2) {
7331 				lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7332 						      pmbox->context2,
7333 						      pmbox->out_ext_byte_len);
7334 			}
7335 		} else {
7336 			/* First copy command data */
7337 			lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7338 							MAILBOX_CMD_SIZE);
7339 			/* Copy the mailbox extension data */
7340 			if (pmbox->out_ext_byte_len && pmbox->context2) {
7341 				lpfc_memcpy_from_slim(pmbox->context2,
7342 					phba->MBslimaddr +
7343 					MAILBOX_HBA_EXT_OFFSET,
7344 					pmbox->out_ext_byte_len);
7345 			}
7346 		}
7347 
7348 		writel(HA_MBATT, phba->HAregaddr);
7349 		readl(phba->HAregaddr); /* flush */
7350 
7351 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7352 		status = mbx->mbxStatus;
7353 	}
7354 
7355 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7356 	return status;
7357 
7358 out_not_finished:
7359 	if (processing_queue) {
7360 		pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7361 		lpfc_mbox_cmpl_put(phba, pmbox);
7362 	}
7363 	return MBX_NOT_FINISHED;
7364 }
7365 
7366 /**
7367  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7368  * @phba: Pointer to HBA context object.
7369  *
7370  * The function blocks the posting of SLI4 asynchronous mailbox commands from
7371  * the driver internal pending mailbox queue. It will then try to wait out the
7372  * possible outstanding mailbox command before return.
7373  *
7374  * Returns:
7375  * 	0 - the outstanding mailbox command completed; otherwise, the wait for
7376  * 	the outstanding mailbox command timed out.
7377  **/
7378 static int
7379 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7380 {
7381 	struct lpfc_sli *psli = &phba->sli;
7382 	int rc = 0;
7383 	unsigned long timeout = 0;
7384 
7385 	/* Mark the asynchronous mailbox command posting as blocked */
7386 	spin_lock_irq(&phba->hbalock);
7387 	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7388 	/* Determine how long we might wait for the active mailbox
7389 	 * command to be gracefully completed by firmware.
7390 	 */
7391 	if (phba->sli.mbox_active)
7392 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7393 						phba->sli.mbox_active) *
7394 						1000) + jiffies;
7395 	spin_unlock_irq(&phba->hbalock);
7396 
7397 	/* Make sure the mailbox is really active */
7398 	if (timeout)
7399 		lpfc_sli4_process_missed_mbox_completions(phba);
7400 
7401 	/* Wait for the outstnading mailbox command to complete */
7402 	while (phba->sli.mbox_active) {
7403 		/* Check active mailbox complete status every 2ms */
7404 		msleep(2);
7405 		if (time_after(jiffies, timeout)) {
7406 			/* Timeout, marked the outstanding cmd not complete */
7407 			rc = 1;
7408 			break;
7409 		}
7410 	}
7411 
7412 	/* Can not cleanly block async mailbox command, fails it */
7413 	if (rc) {
7414 		spin_lock_irq(&phba->hbalock);
7415 		psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7416 		spin_unlock_irq(&phba->hbalock);
7417 	}
7418 	return rc;
7419 }
7420 
7421 /**
7422  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7423  * @phba: Pointer to HBA context object.
7424  *
7425  * The function unblocks and resume posting of SLI4 asynchronous mailbox
7426  * commands from the driver internal pending mailbox queue. It makes sure
7427  * that there is no outstanding mailbox command before resuming posting
7428  * asynchronous mailbox commands. If, for any reason, there is outstanding
7429  * mailbox command, it will try to wait it out before resuming asynchronous
7430  * mailbox command posting.
7431  **/
7432 static void
7433 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7434 {
7435 	struct lpfc_sli *psli = &phba->sli;
7436 
7437 	spin_lock_irq(&phba->hbalock);
7438 	if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7439 		/* Asynchronous mailbox posting is not blocked, do nothing */
7440 		spin_unlock_irq(&phba->hbalock);
7441 		return;
7442 	}
7443 
7444 	/* Outstanding synchronous mailbox command is guaranteed to be done,
7445 	 * successful or timeout, after timing-out the outstanding mailbox
7446 	 * command shall always be removed, so just unblock posting async
7447 	 * mailbox command and resume
7448 	 */
7449 	psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7450 	spin_unlock_irq(&phba->hbalock);
7451 
7452 	/* wake up worker thread to post asynchronlous mailbox command */
7453 	lpfc_worker_wake_up(phba);
7454 }
7455 
7456 /**
7457  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7458  * @phba: Pointer to HBA context object.
7459  * @mboxq: Pointer to mailbox object.
7460  *
7461  * The function waits for the bootstrap mailbox register ready bit from
7462  * port for twice the regular mailbox command timeout value.
7463  *
7464  *      0 - no timeout on waiting for bootstrap mailbox register ready.
7465  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7466  **/
7467 static int
7468 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7469 {
7470 	uint32_t db_ready;
7471 	unsigned long timeout;
7472 	struct lpfc_register bmbx_reg;
7473 
7474 	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7475 				   * 1000) + jiffies;
7476 
7477 	do {
7478 		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7479 		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7480 		if (!db_ready)
7481 			msleep(2);
7482 
7483 		if (time_after(jiffies, timeout))
7484 			return MBXERR_ERROR;
7485 	} while (!db_ready);
7486 
7487 	return 0;
7488 }
7489 
7490 /**
7491  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7492  * @phba: Pointer to HBA context object.
7493  * @mboxq: Pointer to mailbox object.
7494  *
7495  * The function posts a mailbox to the port.  The mailbox is expected
7496  * to be comletely filled in and ready for the port to operate on it.
7497  * This routine executes a synchronous completion operation on the
7498  * mailbox by polling for its completion.
7499  *
7500  * The caller must not be holding any locks when calling this routine.
7501  *
7502  * Returns:
7503  *	MBX_SUCCESS - mailbox posted successfully
7504  *	Any of the MBX error values.
7505  **/
7506 static int
7507 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7508 {
7509 	int rc = MBX_SUCCESS;
7510 	unsigned long iflag;
7511 	uint32_t mcqe_status;
7512 	uint32_t mbx_cmnd;
7513 	struct lpfc_sli *psli = &phba->sli;
7514 	struct lpfc_mqe *mb = &mboxq->u.mqe;
7515 	struct lpfc_bmbx_create *mbox_rgn;
7516 	struct dma_address *dma_address;
7517 
7518 	/*
7519 	 * Only one mailbox can be active to the bootstrap mailbox region
7520 	 * at a time and there is no queueing provided.
7521 	 */
7522 	spin_lock_irqsave(&phba->hbalock, iflag);
7523 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7524 		spin_unlock_irqrestore(&phba->hbalock, iflag);
7525 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7526 				"(%d):2532 Mailbox command x%x (x%x/x%x) "
7527 				"cannot issue Data: x%x x%x\n",
7528 				mboxq->vport ? mboxq->vport->vpi : 0,
7529 				mboxq->u.mb.mbxCommand,
7530 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7531 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7532 				psli->sli_flag, MBX_POLL);
7533 		return MBXERR_ERROR;
7534 	}
7535 	/* The server grabs the token and owns it until release */
7536 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7537 	phba->sli.mbox_active = mboxq;
7538 	spin_unlock_irqrestore(&phba->hbalock, iflag);
7539 
7540 	/* wait for bootstrap mbox register for readyness */
7541 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7542 	if (rc)
7543 		goto exit;
7544 
7545 	/*
7546 	 * Initialize the bootstrap memory region to avoid stale data areas
7547 	 * in the mailbox post.  Then copy the caller's mailbox contents to
7548 	 * the bmbx mailbox region.
7549 	 */
7550 	mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7551 	memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7552 	lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7553 			      sizeof(struct lpfc_mqe));
7554 
7555 	/* Post the high mailbox dma address to the port and wait for ready. */
7556 	dma_address = &phba->sli4_hba.bmbx.dma_address;
7557 	writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7558 
7559 	/* wait for bootstrap mbox register for hi-address write done */
7560 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7561 	if (rc)
7562 		goto exit;
7563 
7564 	/* Post the low mailbox dma address to the port. */
7565 	writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7566 
7567 	/* wait for bootstrap mbox register for low address write done */
7568 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7569 	if (rc)
7570 		goto exit;
7571 
7572 	/*
7573 	 * Read the CQ to ensure the mailbox has completed.
7574 	 * If so, update the mailbox status so that the upper layers
7575 	 * can complete the request normally.
7576 	 */
7577 	lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7578 			      sizeof(struct lpfc_mqe));
7579 	mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7580 	lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7581 			      sizeof(struct lpfc_mcqe));
7582 	mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
7583 	/*
7584 	 * When the CQE status indicates a failure and the mailbox status
7585 	 * indicates success then copy the CQE status into the mailbox status
7586 	 * (and prefix it with x4000).
7587 	 */
7588 	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
7589 		if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7590 			bf_set(lpfc_mqe_status, mb,
7591 			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
7592 		rc = MBXERR_ERROR;
7593 	} else
7594 		lpfc_sli4_swap_str(phba, mboxq);
7595 
7596 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7597 			"(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
7598 			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7599 			" x%x x%x CQ: x%x x%x x%x x%x\n",
7600 			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7601 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7602 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7603 			bf_get(lpfc_mqe_status, mb),
7604 			mb->un.mb_words[0], mb->un.mb_words[1],
7605 			mb->un.mb_words[2], mb->un.mb_words[3],
7606 			mb->un.mb_words[4], mb->un.mb_words[5],
7607 			mb->un.mb_words[6], mb->un.mb_words[7],
7608 			mb->un.mb_words[8], mb->un.mb_words[9],
7609 			mb->un.mb_words[10], mb->un.mb_words[11],
7610 			mb->un.mb_words[12], mboxq->mcqe.word0,
7611 			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
7612 			mboxq->mcqe.trailer);
7613 exit:
7614 	/* We are holding the token, no needed for lock when release */
7615 	spin_lock_irqsave(&phba->hbalock, iflag);
7616 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7617 	phba->sli.mbox_active = NULL;
7618 	spin_unlock_irqrestore(&phba->hbalock, iflag);
7619 	return rc;
7620 }
7621 
7622 /**
7623  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7624  * @phba: Pointer to HBA context object.
7625  * @pmbox: Pointer to mailbox object.
7626  * @flag: Flag indicating how the mailbox need to be processed.
7627  *
7628  * This function is called by discovery code and HBA management code to submit
7629  * a mailbox command to firmware with SLI-4 interface spec.
7630  *
7631  * Return codes the caller owns the mailbox command after the return of the
7632  * function.
7633  **/
7634 static int
7635 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7636 		       uint32_t flag)
7637 {
7638 	struct lpfc_sli *psli = &phba->sli;
7639 	unsigned long iflags;
7640 	int rc;
7641 
7642 	/* dump from issue mailbox command if setup */
7643 	lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7644 
7645 	rc = lpfc_mbox_dev_check(phba);
7646 	if (unlikely(rc)) {
7647 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7648 				"(%d):2544 Mailbox command x%x (x%x/x%x) "
7649 				"cannot issue Data: x%x x%x\n",
7650 				mboxq->vport ? mboxq->vport->vpi : 0,
7651 				mboxq->u.mb.mbxCommand,
7652 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7653 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7654 				psli->sli_flag, flag);
7655 		goto out_not_finished;
7656 	}
7657 
7658 	/* Detect polling mode and jump to a handler */
7659 	if (!phba->sli4_hba.intr_enable) {
7660 		if (flag == MBX_POLL)
7661 			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7662 		else
7663 			rc = -EIO;
7664 		if (rc != MBX_SUCCESS)
7665 			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7666 					"(%d):2541 Mailbox command x%x "
7667 					"(x%x/x%x) failure: "
7668 					"mqe_sta: x%x mcqe_sta: x%x/x%x "
7669 					"Data: x%x x%x\n,",
7670 					mboxq->vport ? mboxq->vport->vpi : 0,
7671 					mboxq->u.mb.mbxCommand,
7672 					lpfc_sli_config_mbox_subsys_get(phba,
7673 									mboxq),
7674 					lpfc_sli_config_mbox_opcode_get(phba,
7675 									mboxq),
7676 					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7677 					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7678 					bf_get(lpfc_mcqe_ext_status,
7679 					       &mboxq->mcqe),
7680 					psli->sli_flag, flag);
7681 		return rc;
7682 	} else if (flag == MBX_POLL) {
7683 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7684 				"(%d):2542 Try to issue mailbox command "
7685 				"x%x (x%x/x%x) synchronously ahead of async"
7686 				"mailbox command queue: x%x x%x\n",
7687 				mboxq->vport ? mboxq->vport->vpi : 0,
7688 				mboxq->u.mb.mbxCommand,
7689 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7690 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7691 				psli->sli_flag, flag);
7692 		/* Try to block the asynchronous mailbox posting */
7693 		rc = lpfc_sli4_async_mbox_block(phba);
7694 		if (!rc) {
7695 			/* Successfully blocked, now issue sync mbox cmd */
7696 			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7697 			if (rc != MBX_SUCCESS)
7698 				lpfc_printf_log(phba, KERN_WARNING,
7699 					LOG_MBOX | LOG_SLI,
7700 					"(%d):2597 Sync Mailbox command "
7701 					"x%x (x%x/x%x) failure: "
7702 					"mqe_sta: x%x mcqe_sta: x%x/x%x "
7703 					"Data: x%x x%x\n,",
7704 					mboxq->vport ? mboxq->vport->vpi : 0,
7705 					mboxq->u.mb.mbxCommand,
7706 					lpfc_sli_config_mbox_subsys_get(phba,
7707 									mboxq),
7708 					lpfc_sli_config_mbox_opcode_get(phba,
7709 									mboxq),
7710 					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7711 					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7712 					bf_get(lpfc_mcqe_ext_status,
7713 					       &mboxq->mcqe),
7714 					psli->sli_flag, flag);
7715 			/* Unblock the async mailbox posting afterward */
7716 			lpfc_sli4_async_mbox_unblock(phba);
7717 		}
7718 		return rc;
7719 	}
7720 
7721 	/* Now, interrupt mode asynchrous mailbox command */
7722 	rc = lpfc_mbox_cmd_check(phba, mboxq);
7723 	if (rc) {
7724 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7725 				"(%d):2543 Mailbox command x%x (x%x/x%x) "
7726 				"cannot issue Data: x%x x%x\n",
7727 				mboxq->vport ? mboxq->vport->vpi : 0,
7728 				mboxq->u.mb.mbxCommand,
7729 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7730 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7731 				psli->sli_flag, flag);
7732 		goto out_not_finished;
7733 	}
7734 
7735 	/* Put the mailbox command to the driver internal FIFO */
7736 	psli->slistat.mbox_busy++;
7737 	spin_lock_irqsave(&phba->hbalock, iflags);
7738 	lpfc_mbox_put(phba, mboxq);
7739 	spin_unlock_irqrestore(&phba->hbalock, iflags);
7740 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7741 			"(%d):0354 Mbox cmd issue - Enqueue Data: "
7742 			"x%x (x%x/x%x) x%x x%x x%x\n",
7743 			mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7744 			bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7745 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7746 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7747 			phba->pport->port_state,
7748 			psli->sli_flag, MBX_NOWAIT);
7749 	/* Wake up worker thread to transport mailbox command from head */
7750 	lpfc_worker_wake_up(phba);
7751 
7752 	return MBX_BUSY;
7753 
7754 out_not_finished:
7755 	return MBX_NOT_FINISHED;
7756 }
7757 
7758 /**
7759  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7760  * @phba: Pointer to HBA context object.
7761  *
7762  * This function is called by worker thread to send a mailbox command to
7763  * SLI4 HBA firmware.
7764  *
7765  **/
7766 int
7767 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7768 {
7769 	struct lpfc_sli *psli = &phba->sli;
7770 	LPFC_MBOXQ_t *mboxq;
7771 	int rc = MBX_SUCCESS;
7772 	unsigned long iflags;
7773 	struct lpfc_mqe *mqe;
7774 	uint32_t mbx_cmnd;
7775 
7776 	/* Check interrupt mode before post async mailbox command */
7777 	if (unlikely(!phba->sli4_hba.intr_enable))
7778 		return MBX_NOT_FINISHED;
7779 
7780 	/* Check for mailbox command service token */
7781 	spin_lock_irqsave(&phba->hbalock, iflags);
7782 	if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7783 		spin_unlock_irqrestore(&phba->hbalock, iflags);
7784 		return MBX_NOT_FINISHED;
7785 	}
7786 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7787 		spin_unlock_irqrestore(&phba->hbalock, iflags);
7788 		return MBX_NOT_FINISHED;
7789 	}
7790 	if (unlikely(phba->sli.mbox_active)) {
7791 		spin_unlock_irqrestore(&phba->hbalock, iflags);
7792 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7793 				"0384 There is pending active mailbox cmd\n");
7794 		return MBX_NOT_FINISHED;
7795 	}
7796 	/* Take the mailbox command service token */
7797 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7798 
7799 	/* Get the next mailbox command from head of queue */
7800 	mboxq = lpfc_mbox_get(phba);
7801 
7802 	/* If no more mailbox command waiting for post, we're done */
7803 	if (!mboxq) {
7804 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7805 		spin_unlock_irqrestore(&phba->hbalock, iflags);
7806 		return MBX_SUCCESS;
7807 	}
7808 	phba->sli.mbox_active = mboxq;
7809 	spin_unlock_irqrestore(&phba->hbalock, iflags);
7810 
7811 	/* Check device readiness for posting mailbox command */
7812 	rc = lpfc_mbox_dev_check(phba);
7813 	if (unlikely(rc))
7814 		/* Driver clean routine will clean up pending mailbox */
7815 		goto out_not_finished;
7816 
7817 	/* Prepare the mbox command to be posted */
7818 	mqe = &mboxq->u.mqe;
7819 	mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7820 
7821 	/* Start timer for the mbox_tmo and log some mailbox post messages */
7822 	mod_timer(&psli->mbox_tmo, (jiffies +
7823 		  msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
7824 
7825 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7826 			"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
7827 			"x%x x%x\n",
7828 			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7829 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7830 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7831 			phba->pport->port_state, psli->sli_flag);
7832 
7833 	if (mbx_cmnd != MBX_HEARTBEAT) {
7834 		if (mboxq->vport) {
7835 			lpfc_debugfs_disc_trc(mboxq->vport,
7836 				LPFC_DISC_TRC_MBOX_VPORT,
7837 				"MBOX Send vport: cmd:x%x mb:x%x x%x",
7838 				mbx_cmnd, mqe->un.mb_words[0],
7839 				mqe->un.mb_words[1]);
7840 		} else {
7841 			lpfc_debugfs_disc_trc(phba->pport,
7842 				LPFC_DISC_TRC_MBOX,
7843 				"MBOX Send: cmd:x%x mb:x%x x%x",
7844 				mbx_cmnd, mqe->un.mb_words[0],
7845 				mqe->un.mb_words[1]);
7846 		}
7847 	}
7848 	psli->slistat.mbox_cmd++;
7849 
7850 	/* Post the mailbox command to the port */
7851 	rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7852 	if (rc != MBX_SUCCESS) {
7853 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7854 				"(%d):2533 Mailbox command x%x (x%x/x%x) "
7855 				"cannot issue Data: x%x x%x\n",
7856 				mboxq->vport ? mboxq->vport->vpi : 0,
7857 				mboxq->u.mb.mbxCommand,
7858 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7859 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7860 				psli->sli_flag, MBX_NOWAIT);
7861 		goto out_not_finished;
7862 	}
7863 
7864 	return rc;
7865 
7866 out_not_finished:
7867 	spin_lock_irqsave(&phba->hbalock, iflags);
7868 	if (phba->sli.mbox_active) {
7869 		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7870 		__lpfc_mbox_cmpl_put(phba, mboxq);
7871 		/* Release the token */
7872 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7873 		phba->sli.mbox_active = NULL;
7874 	}
7875 	spin_unlock_irqrestore(&phba->hbalock, iflags);
7876 
7877 	return MBX_NOT_FINISHED;
7878 }
7879 
7880 /**
7881  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7882  * @phba: Pointer to HBA context object.
7883  * @pmbox: Pointer to mailbox object.
7884  * @flag: Flag indicating how the mailbox need to be processed.
7885  *
7886  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7887  * the API jump table function pointer from the lpfc_hba struct.
7888  *
7889  * Return codes the caller owns the mailbox command after the return of the
7890  * function.
7891  **/
7892 int
7893 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7894 {
7895 	return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7896 }
7897 
7898 /**
7899  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
7900  * @phba: The hba struct for which this call is being executed.
7901  * @dev_grp: The HBA PCI-Device group number.
7902  *
7903  * This routine sets up the mbox interface API function jump table in @phba
7904  * struct.
7905  * Returns: 0 - success, -ENODEV - failure.
7906  **/
7907 int
7908 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7909 {
7910 
7911 	switch (dev_grp) {
7912 	case LPFC_PCI_DEV_LP:
7913 		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7914 		phba->lpfc_sli_handle_slow_ring_event =
7915 				lpfc_sli_handle_slow_ring_event_s3;
7916 		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7917 		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7918 		phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7919 		break;
7920 	case LPFC_PCI_DEV_OC:
7921 		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7922 		phba->lpfc_sli_handle_slow_ring_event =
7923 				lpfc_sli_handle_slow_ring_event_s4;
7924 		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7925 		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7926 		phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7927 		break;
7928 	default:
7929 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7930 				"1420 Invalid HBA PCI-device group: 0x%x\n",
7931 				dev_grp);
7932 		return -ENODEV;
7933 		break;
7934 	}
7935 	return 0;
7936 }
7937 
7938 /**
7939  * __lpfc_sli_ringtx_put - Add an iocb to the txq
7940  * @phba: Pointer to HBA context object.
7941  * @pring: Pointer to driver SLI ring object.
7942  * @piocb: Pointer to address of newly added command iocb.
7943  *
7944  * This function is called with hbalock held to add a command
7945  * iocb to the txq when SLI layer cannot submit the command iocb
7946  * to the ring.
7947  **/
7948 void
7949 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7950 		    struct lpfc_iocbq *piocb)
7951 {
7952 	lockdep_assert_held(&phba->hbalock);
7953 	/* Insert the caller's iocb in the txq tail for later processing. */
7954 	list_add_tail(&piocb->list, &pring->txq);
7955 }
7956 
7957 /**
7958  * lpfc_sli_next_iocb - Get the next iocb in the txq
7959  * @phba: Pointer to HBA context object.
7960  * @pring: Pointer to driver SLI ring object.
7961  * @piocb: Pointer to address of newly added command iocb.
7962  *
7963  * This function is called with hbalock held before a new
7964  * iocb is submitted to the firmware. This function checks
7965  * txq to flush the iocbs in txq to Firmware before
7966  * submitting new iocbs to the Firmware.
7967  * If there are iocbs in the txq which need to be submitted
7968  * to firmware, lpfc_sli_next_iocb returns the first element
7969  * of the txq after dequeuing it from txq.
7970  * If there is no iocb in the txq then the function will return
7971  * *piocb and *piocb is set to NULL. Caller needs to check
7972  * *piocb to find if there are more commands in the txq.
7973  **/
7974 static struct lpfc_iocbq *
7975 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7976 		   struct lpfc_iocbq **piocb)
7977 {
7978 	struct lpfc_iocbq * nextiocb;
7979 
7980 	lockdep_assert_held(&phba->hbalock);
7981 
7982 	nextiocb = lpfc_sli_ringtx_get(phba, pring);
7983 	if (!nextiocb) {
7984 		nextiocb = *piocb;
7985 		*piocb = NULL;
7986 	}
7987 
7988 	return nextiocb;
7989 }
7990 
7991 /**
7992  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
7993  * @phba: Pointer to HBA context object.
7994  * @ring_number: SLI ring number to issue iocb on.
7995  * @piocb: Pointer to command iocb.
7996  * @flag: Flag indicating if this command can be put into txq.
7997  *
7998  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7999  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8000  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8001  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8002  * this function allows only iocbs for posting buffers. This function finds
8003  * next available slot in the command ring and posts the command to the
8004  * available slot and writes the port attention register to request HBA start
8005  * processing new iocb. If there is no slot available in the ring and
8006  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8007  * the function returns IOCB_BUSY.
8008  *
8009  * This function is called with hbalock held. The function will return success
8010  * after it successfully submit the iocb to firmware or after adding to the
8011  * txq.
8012  **/
8013 static int
8014 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
8015 		    struct lpfc_iocbq *piocb, uint32_t flag)
8016 {
8017 	struct lpfc_iocbq *nextiocb;
8018 	IOCB_t *iocb;
8019 	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8020 
8021 	lockdep_assert_held(&phba->hbalock);
8022 
8023 	if (piocb->iocb_cmpl && (!piocb->vport) &&
8024 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8025 	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8026 		lpfc_printf_log(phba, KERN_ERR,
8027 				LOG_SLI | LOG_VPORT,
8028 				"1807 IOCB x%x failed. No vport\n",
8029 				piocb->iocb.ulpCommand);
8030 		dump_stack();
8031 		return IOCB_ERROR;
8032 	}
8033 
8034 
8035 	/* If the PCI channel is in offline state, do not post iocbs. */
8036 	if (unlikely(pci_channel_offline(phba->pcidev)))
8037 		return IOCB_ERROR;
8038 
8039 	/* If HBA has a deferred error attention, fail the iocb. */
8040 	if (unlikely(phba->hba_flag & DEFER_ERATT))
8041 		return IOCB_ERROR;
8042 
8043 	/*
8044 	 * We should never get an IOCB if we are in a < LINK_DOWN state
8045 	 */
8046 	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
8047 		return IOCB_ERROR;
8048 
8049 	/*
8050 	 * Check to see if we are blocking IOCB processing because of a
8051 	 * outstanding event.
8052 	 */
8053 	if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
8054 		goto iocb_busy;
8055 
8056 	if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8057 		/*
8058 		 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
8059 		 * can be issued if the link is not up.
8060 		 */
8061 		switch (piocb->iocb.ulpCommand) {
8062 		case CMD_GEN_REQUEST64_CR:
8063 		case CMD_GEN_REQUEST64_CX:
8064 			if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8065 				(piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8066 					FC_RCTL_DD_UNSOL_CMD) ||
8067 				(piocb->iocb.un.genreq64.w5.hcsw.Type !=
8068 					MENLO_TRANSPORT_TYPE))
8069 
8070 				goto iocb_busy;
8071 			break;
8072 		case CMD_QUE_RING_BUF_CN:
8073 		case CMD_QUE_RING_BUF64_CN:
8074 			/*
8075 			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8076 			 * completion, iocb_cmpl MUST be 0.
8077 			 */
8078 			if (piocb->iocb_cmpl)
8079 				piocb->iocb_cmpl = NULL;
8080 			/*FALLTHROUGH*/
8081 		case CMD_CREATE_XRI_CR:
8082 		case CMD_CLOSE_XRI_CN:
8083 		case CMD_CLOSE_XRI_CX:
8084 			break;
8085 		default:
8086 			goto iocb_busy;
8087 		}
8088 
8089 	/*
8090 	 * For FCP commands, we must be in a state where we can process link
8091 	 * attention events.
8092 	 */
8093 	} else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
8094 			    !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8095 		goto iocb_busy;
8096 	}
8097 
8098 	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8099 	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8100 		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8101 
8102 	if (iocb)
8103 		lpfc_sli_update_ring(phba, pring);
8104 	else
8105 		lpfc_sli_update_full_ring(phba, pring);
8106 
8107 	if (!piocb)
8108 		return IOCB_SUCCESS;
8109 
8110 	goto out_busy;
8111 
8112  iocb_busy:
8113 	pring->stats.iocb_cmd_delay++;
8114 
8115  out_busy:
8116 
8117 	if (!(flag & SLI_IOCB_RET_IOCB)) {
8118 		__lpfc_sli_ringtx_put(phba, pring, piocb);
8119 		return IOCB_SUCCESS;
8120 	}
8121 
8122 	return IOCB_BUSY;
8123 }
8124 
8125 /**
8126  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8127  * @phba: Pointer to HBA context object.
8128  * @piocb: Pointer to command iocb.
8129  * @sglq: Pointer to the scatter gather queue object.
8130  *
8131  * This routine converts the bpl or bde that is in the IOCB
8132  * to a sgl list for the sli4 hardware. The physical address
8133  * of the bpl/bde is converted back to a virtual address.
8134  * If the IOCB contains a BPL then the list of BDE's is
8135  * converted to sli4_sge's. If the IOCB contains a single
8136  * BDE then it is converted to a single sli_sge.
8137  * The IOCB is still in cpu endianess so the contents of
8138  * the bpl can be used without byte swapping.
8139  *
8140  * Returns valid XRI = Success, NO_XRI = Failure.
8141 **/
8142 static uint16_t
8143 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8144 		struct lpfc_sglq *sglq)
8145 {
8146 	uint16_t xritag = NO_XRI;
8147 	struct ulp_bde64 *bpl = NULL;
8148 	struct ulp_bde64 bde;
8149 	struct sli4_sge *sgl  = NULL;
8150 	struct lpfc_dmabuf *dmabuf;
8151 	IOCB_t *icmd;
8152 	int numBdes = 0;
8153 	int i = 0;
8154 	uint32_t offset = 0; /* accumulated offset in the sg request list */
8155 	int inbound = 0; /* number of sg reply entries inbound from firmware */
8156 
8157 	if (!piocbq || !sglq)
8158 		return xritag;
8159 
8160 	sgl  = (struct sli4_sge *)sglq->sgl;
8161 	icmd = &piocbq->iocb;
8162 	if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8163 		return sglq->sli4_xritag;
8164 	if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8165 		numBdes = icmd->un.genreq64.bdl.bdeSize /
8166 				sizeof(struct ulp_bde64);
8167 		/* The addrHigh and addrLow fields within the IOCB
8168 		 * have not been byteswapped yet so there is no
8169 		 * need to swap them back.
8170 		 */
8171 		if (piocbq->context3)
8172 			dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8173 		else
8174 			return xritag;
8175 
8176 		bpl  = (struct ulp_bde64 *)dmabuf->virt;
8177 		if (!bpl)
8178 			return xritag;
8179 
8180 		for (i = 0; i < numBdes; i++) {
8181 			/* Should already be byte swapped. */
8182 			sgl->addr_hi = bpl->addrHigh;
8183 			sgl->addr_lo = bpl->addrLow;
8184 
8185 			sgl->word2 = le32_to_cpu(sgl->word2);
8186 			if ((i+1) == numBdes)
8187 				bf_set(lpfc_sli4_sge_last, sgl, 1);
8188 			else
8189 				bf_set(lpfc_sli4_sge_last, sgl, 0);
8190 			/* swap the size field back to the cpu so we
8191 			 * can assign it to the sgl.
8192 			 */
8193 			bde.tus.w = le32_to_cpu(bpl->tus.w);
8194 			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8195 			/* The offsets in the sgl need to be accumulated
8196 			 * separately for the request and reply lists.
8197 			 * The request is always first, the reply follows.
8198 			 */
8199 			if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8200 				/* add up the reply sg entries */
8201 				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8202 					inbound++;
8203 				/* first inbound? reset the offset */
8204 				if (inbound == 1)
8205 					offset = 0;
8206 				bf_set(lpfc_sli4_sge_offset, sgl, offset);
8207 				bf_set(lpfc_sli4_sge_type, sgl,
8208 					LPFC_SGE_TYPE_DATA);
8209 				offset += bde.tus.f.bdeSize;
8210 			}
8211 			sgl->word2 = cpu_to_le32(sgl->word2);
8212 			bpl++;
8213 			sgl++;
8214 		}
8215 	} else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8216 			/* The addrHigh and addrLow fields of the BDE have not
8217 			 * been byteswapped yet so they need to be swapped
8218 			 * before putting them in the sgl.
8219 			 */
8220 			sgl->addr_hi =
8221 				cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8222 			sgl->addr_lo =
8223 				cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8224 			sgl->word2 = le32_to_cpu(sgl->word2);
8225 			bf_set(lpfc_sli4_sge_last, sgl, 1);
8226 			sgl->word2 = cpu_to_le32(sgl->word2);
8227 			sgl->sge_len =
8228 				cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8229 	}
8230 	return sglq->sli4_xritag;
8231 }
8232 
8233 /**
8234  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8235  * @phba: Pointer to HBA context object.
8236  * @piocb: Pointer to command iocb.
8237  * @wqe: Pointer to the work queue entry.
8238  *
8239  * This routine converts the iocb command to its Work Queue Entry
8240  * equivalent. The wqe pointer should not have any fields set when
8241  * this routine is called because it will memcpy over them.
8242  * This routine does not set the CQ_ID or the WQEC bits in the
8243  * wqe.
8244  *
8245  * Returns: 0 = Success, IOCB_ERROR = Failure.
8246  **/
8247 static int
8248 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8249 		union lpfc_wqe *wqe)
8250 {
8251 	uint32_t xmit_len = 0, total_len = 0;
8252 	uint8_t ct = 0;
8253 	uint32_t fip;
8254 	uint32_t abort_tag;
8255 	uint8_t command_type = ELS_COMMAND_NON_FIP;
8256 	uint8_t cmnd;
8257 	uint16_t xritag;
8258 	uint16_t abrt_iotag;
8259 	struct lpfc_iocbq *abrtiocbq;
8260 	struct ulp_bde64 *bpl = NULL;
8261 	uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8262 	int numBdes, i;
8263 	struct ulp_bde64 bde;
8264 	struct lpfc_nodelist *ndlp;
8265 	uint32_t *pcmd;
8266 	uint32_t if_type;
8267 
8268 	fip = phba->hba_flag & HBA_FIP_SUPPORT;
8269 	/* The fcp commands will set command type */
8270 	if (iocbq->iocb_flag &  LPFC_IO_FCP)
8271 		command_type = FCP_COMMAND;
8272 	else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8273 		command_type = ELS_COMMAND_FIP;
8274 	else
8275 		command_type = ELS_COMMAND_NON_FIP;
8276 
8277 	if (phba->fcp_embed_io)
8278 		memset(wqe, 0, sizeof(union lpfc_wqe128));
8279 	/* Some of the fields are in the right position already */
8280 	memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8281 	wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
8282 	wqe->generic.wqe_com.word10 = 0;
8283 
8284 	abort_tag = (uint32_t) iocbq->iotag;
8285 	xritag = iocbq->sli4_xritag;
8286 	/* words0-2 bpl convert bde */
8287 	if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8288 		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8289 				sizeof(struct ulp_bde64);
8290 		bpl  = (struct ulp_bde64 *)
8291 			((struct lpfc_dmabuf *)iocbq->context3)->virt;
8292 		if (!bpl)
8293 			return IOCB_ERROR;
8294 
8295 		/* Should already be byte swapped. */
8296 		wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
8297 		wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
8298 		/* swap the size field back to the cpu so we
8299 		 * can assign it to the sgl.
8300 		 */
8301 		wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
8302 		xmit_len = wqe->generic.bde.tus.f.bdeSize;
8303 		total_len = 0;
8304 		for (i = 0; i < numBdes; i++) {
8305 			bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
8306 			total_len += bde.tus.f.bdeSize;
8307 		}
8308 	} else
8309 		xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8310 
8311 	iocbq->iocb.ulpIoTag = iocbq->iotag;
8312 	cmnd = iocbq->iocb.ulpCommand;
8313 
8314 	switch (iocbq->iocb.ulpCommand) {
8315 	case CMD_ELS_REQUEST64_CR:
8316 		if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8317 			ndlp = iocbq->context_un.ndlp;
8318 		else
8319 			ndlp = (struct lpfc_nodelist *)iocbq->context1;
8320 		if (!iocbq->iocb.ulpLe) {
8321 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8322 				"2007 Only Limited Edition cmd Format"
8323 				" supported 0x%x\n",
8324 				iocbq->iocb.ulpCommand);
8325 			return IOCB_ERROR;
8326 		}
8327 
8328 		wqe->els_req.payload_len = xmit_len;
8329 		/* Els_reguest64 has a TMO */
8330 		bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8331 			iocbq->iocb.ulpTimeout);
8332 		/* Need a VF for word 4 set the vf bit*/
8333 		bf_set(els_req64_vf, &wqe->els_req, 0);
8334 		/* And a VFID for word 12 */
8335 		bf_set(els_req64_vfid, &wqe->els_req, 0);
8336 		ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8337 		bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8338 		       iocbq->iocb.ulpContext);
8339 		bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8340 		bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8341 		/* CCP CCPE PV PRI in word10 were set in the memcpy */
8342 		if (command_type == ELS_COMMAND_FIP)
8343 			els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8344 					>> LPFC_FIP_ELS_ID_SHIFT);
8345 		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8346 					iocbq->context2)->virt);
8347 		if_type = bf_get(lpfc_sli_intf_if_type,
8348 					&phba->sli4_hba.sli_intf);
8349 		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8350 			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8351 				*pcmd == ELS_CMD_SCR ||
8352 				*pcmd == ELS_CMD_FDISC ||
8353 				*pcmd == ELS_CMD_LOGO ||
8354 				*pcmd == ELS_CMD_PLOGI)) {
8355 				bf_set(els_req64_sp, &wqe->els_req, 1);
8356 				bf_set(els_req64_sid, &wqe->els_req,
8357 					iocbq->vport->fc_myDID);
8358 				if ((*pcmd == ELS_CMD_FLOGI) &&
8359 					!(phba->fc_topology ==
8360 						LPFC_TOPOLOGY_LOOP))
8361 					bf_set(els_req64_sid, &wqe->els_req, 0);
8362 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8363 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8364 					phba->vpi_ids[iocbq->vport->vpi]);
8365 			} else if (pcmd && iocbq->context1) {
8366 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8367 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8368 					phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8369 			}
8370 		}
8371 		bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8372 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8373 		bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8374 		bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8375 		bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8376 		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8377 		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8378 		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8379 		wqe->els_req.max_response_payload_len = total_len - xmit_len;
8380 		break;
8381 	case CMD_XMIT_SEQUENCE64_CX:
8382 		bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8383 		       iocbq->iocb.un.ulpWord[3]);
8384 		bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
8385 		       iocbq->iocb.unsli3.rcvsli3.ox_id);
8386 		/* The entire sequence is transmitted for this IOCB */
8387 		xmit_len = total_len;
8388 		cmnd = CMD_XMIT_SEQUENCE64_CR;
8389 		if (phba->link_flag & LS_LOOPBACK_MODE)
8390 			bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
8391 	case CMD_XMIT_SEQUENCE64_CR:
8392 		/* word3 iocb=io_tag32 wqe=reserved */
8393 		wqe->xmit_sequence.rsvd3 = 0;
8394 		/* word4 relative_offset memcpy */
8395 		/* word5 r_ctl/df_ctl memcpy */
8396 		bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8397 		bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8398 		bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8399 		       LPFC_WQE_IOD_WRITE);
8400 		bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8401 		       LPFC_WQE_LENLOC_WORD12);
8402 		bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
8403 		wqe->xmit_sequence.xmit_len = xmit_len;
8404 		command_type = OTHER_COMMAND;
8405 		break;
8406 	case CMD_XMIT_BCAST64_CN:
8407 		/* word3 iocb=iotag32 wqe=seq_payload_len */
8408 		wqe->xmit_bcast64.seq_payload_len = xmit_len;
8409 		/* word4 iocb=rsvd wqe=rsvd */
8410 		/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8411 		/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
8412 		bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
8413 			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8414 		bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8415 		bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8416 		bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8417 		       LPFC_WQE_LENLOC_WORD3);
8418 		bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
8419 		break;
8420 	case CMD_FCP_IWRITE64_CR:
8421 		command_type = FCP_COMMAND_DATA_OUT;
8422 		/* word3 iocb=iotag wqe=payload_offset_len */
8423 		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8424 		bf_set(payload_offset_len, &wqe->fcp_iwrite,
8425 		       xmit_len + sizeof(struct fcp_rsp));
8426 		bf_set(cmd_buff_len, &wqe->fcp_iwrite,
8427 		       0);
8428 		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
8429 		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8430 		bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8431 		       iocbq->iocb.ulpFCP2Rcvy);
8432 		bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8433 		/* Always open the exchange */
8434 		bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8435 		bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8436 		       LPFC_WQE_LENLOC_WORD4);
8437 		bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8438 		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8439 		if (iocbq->iocb_flag & LPFC_IO_OAS) {
8440 			bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
8441 			bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8442 			if (iocbq->priority) {
8443 				bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8444 				       (iocbq->priority << 1));
8445 			} else {
8446 				bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8447 				       (phba->cfg_XLanePriority << 1));
8448 			}
8449 		}
8450 		/* Note, word 10 is already initialized to 0 */
8451 
8452 		if (phba->fcp_embed_io) {
8453 			struct lpfc_scsi_buf *lpfc_cmd;
8454 			struct sli4_sge *sgl;
8455 			union lpfc_wqe128 *wqe128;
8456 			struct fcp_cmnd *fcp_cmnd;
8457 			uint32_t *ptr;
8458 
8459 			/* 128 byte wqe support here */
8460 			wqe128 = (union lpfc_wqe128 *)wqe;
8461 
8462 			lpfc_cmd = iocbq->context1;
8463 			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8464 			fcp_cmnd = lpfc_cmd->fcp_cmnd;
8465 
8466 			/* Word 0-2 - FCP_CMND */
8467 			wqe128->generic.bde.tus.f.bdeFlags =
8468 				BUFF_TYPE_BDE_IMMED;
8469 			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8470 			wqe128->generic.bde.addrHigh = 0;
8471 			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
8472 
8473 			bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
8474 
8475 			/* Word 22-29  FCP CMND Payload */
8476 			ptr = &wqe128->words[22];
8477 			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8478 		}
8479 		break;
8480 	case CMD_FCP_IREAD64_CR:
8481 		/* word3 iocb=iotag wqe=payload_offset_len */
8482 		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8483 		bf_set(payload_offset_len, &wqe->fcp_iread,
8484 		       xmit_len + sizeof(struct fcp_rsp));
8485 		bf_set(cmd_buff_len, &wqe->fcp_iread,
8486 		       0);
8487 		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
8488 		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8489 		bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8490 		       iocbq->iocb.ulpFCP2Rcvy);
8491 		bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
8492 		/* Always open the exchange */
8493 		bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8494 		bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8495 		       LPFC_WQE_LENLOC_WORD4);
8496 		bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8497 		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8498 		if (iocbq->iocb_flag & LPFC_IO_OAS) {
8499 			bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
8500 			bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8501 			if (iocbq->priority) {
8502 				bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8503 				       (iocbq->priority << 1));
8504 			} else {
8505 				bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8506 				       (phba->cfg_XLanePriority << 1));
8507 			}
8508 		}
8509 		/* Note, word 10 is already initialized to 0 */
8510 
8511 		if (phba->fcp_embed_io) {
8512 			struct lpfc_scsi_buf *lpfc_cmd;
8513 			struct sli4_sge *sgl;
8514 			union lpfc_wqe128 *wqe128;
8515 			struct fcp_cmnd *fcp_cmnd;
8516 			uint32_t *ptr;
8517 
8518 			/* 128 byte wqe support here */
8519 			wqe128 = (union lpfc_wqe128 *)wqe;
8520 
8521 			lpfc_cmd = iocbq->context1;
8522 			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8523 			fcp_cmnd = lpfc_cmd->fcp_cmnd;
8524 
8525 			/* Word 0-2 - FCP_CMND */
8526 			wqe128->generic.bde.tus.f.bdeFlags =
8527 				BUFF_TYPE_BDE_IMMED;
8528 			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8529 			wqe128->generic.bde.addrHigh = 0;
8530 			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
8531 
8532 			bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
8533 
8534 			/* Word 22-29  FCP CMND Payload */
8535 			ptr = &wqe128->words[22];
8536 			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8537 		}
8538 		break;
8539 	case CMD_FCP_ICMND64_CR:
8540 		/* word3 iocb=iotag wqe=payload_offset_len */
8541 		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8542 		bf_set(payload_offset_len, &wqe->fcp_icmd,
8543 		       xmit_len + sizeof(struct fcp_rsp));
8544 		bf_set(cmd_buff_len, &wqe->fcp_icmd,
8545 		       0);
8546 		/* word3 iocb=IO_TAG wqe=reserved */
8547 		bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
8548 		/* Always open the exchange */
8549 		bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8550 		bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8551 		bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8552 		bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8553 		       LPFC_WQE_LENLOC_NONE);
8554 		bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8555 		       iocbq->iocb.ulpFCP2Rcvy);
8556 		if (iocbq->iocb_flag & LPFC_IO_OAS) {
8557 			bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
8558 			bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8559 			if (iocbq->priority) {
8560 				bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8561 				       (iocbq->priority << 1));
8562 			} else {
8563 				bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8564 				       (phba->cfg_XLanePriority << 1));
8565 			}
8566 		}
8567 		/* Note, word 10 is already initialized to 0 */
8568 
8569 		if (phba->fcp_embed_io) {
8570 			struct lpfc_scsi_buf *lpfc_cmd;
8571 			struct sli4_sge *sgl;
8572 			union lpfc_wqe128 *wqe128;
8573 			struct fcp_cmnd *fcp_cmnd;
8574 			uint32_t *ptr;
8575 
8576 			/* 128 byte wqe support here */
8577 			wqe128 = (union lpfc_wqe128 *)wqe;
8578 
8579 			lpfc_cmd = iocbq->context1;
8580 			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8581 			fcp_cmnd = lpfc_cmd->fcp_cmnd;
8582 
8583 			/* Word 0-2 - FCP_CMND */
8584 			wqe128->generic.bde.tus.f.bdeFlags =
8585 				BUFF_TYPE_BDE_IMMED;
8586 			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8587 			wqe128->generic.bde.addrHigh = 0;
8588 			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
8589 
8590 			bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
8591 
8592 			/* Word 22-29  FCP CMND Payload */
8593 			ptr = &wqe128->words[22];
8594 			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8595 		}
8596 		break;
8597 	case CMD_GEN_REQUEST64_CR:
8598 		/* For this command calculate the xmit length of the
8599 		 * request bde.
8600 		 */
8601 		xmit_len = 0;
8602 		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8603 			sizeof(struct ulp_bde64);
8604 		for (i = 0; i < numBdes; i++) {
8605 			bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8606 			if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8607 				break;
8608 			xmit_len += bde.tus.f.bdeSize;
8609 		}
8610 		/* word3 iocb=IO_TAG wqe=request_payload_len */
8611 		wqe->gen_req.request_payload_len = xmit_len;
8612 		/* word4 iocb=parameter wqe=relative_offset memcpy */
8613 		/* word5 [rctl, type, df_ctl, la] copied in memcpy */
8614 		/* word6 context tag copied in memcpy */
8615 		if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
8616 			ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8617 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8618 				"2015 Invalid CT %x command 0x%x\n",
8619 				ct, iocbq->iocb.ulpCommand);
8620 			return IOCB_ERROR;
8621 		}
8622 		bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8623 		bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8624 		bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8625 		bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8626 		bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8627 		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8628 		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8629 		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
8630 		wqe->gen_req.max_response_payload_len = total_len - xmit_len;
8631 		command_type = OTHER_COMMAND;
8632 		break;
8633 	case CMD_XMIT_ELS_RSP64_CX:
8634 		ndlp = (struct lpfc_nodelist *)iocbq->context1;
8635 		/* words0-2 BDE memcpy */
8636 		/* word3 iocb=iotag32 wqe=response_payload_len */
8637 		wqe->xmit_els_rsp.response_payload_len = xmit_len;
8638 		/* word4 */
8639 		wqe->xmit_els_rsp.word4 = 0;
8640 		/* word5 iocb=rsvd wge=did */
8641 		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
8642 			 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8643 
8644 		if_type = bf_get(lpfc_sli_intf_if_type,
8645 					&phba->sli4_hba.sli_intf);
8646 		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8647 			if (iocbq->vport->fc_flag & FC_PT2PT) {
8648 				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8649 				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8650 					iocbq->vport->fc_myDID);
8651 				if (iocbq->vport->fc_myDID == Fabric_DID) {
8652 					bf_set(wqe_els_did,
8653 						&wqe->xmit_els_rsp.wqe_dest, 0);
8654 				}
8655 			}
8656 		}
8657 		bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8658 		       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8659 		bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8660 		bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8661 		       iocbq->iocb.unsli3.rcvsli3.ox_id);
8662 		if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
8663 			bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8664 			       phba->vpi_ids[iocbq->vport->vpi]);
8665 		bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8666 		bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8667 		bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8668 		bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8669 		       LPFC_WQE_LENLOC_WORD3);
8670 		bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
8671 		bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8672 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8673 		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8674 					iocbq->context2)->virt);
8675 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8676 				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8677 				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8678 					iocbq->vport->fc_myDID);
8679 				bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8680 				bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8681 					phba->vpi_ids[phba->pport->vpi]);
8682 		}
8683 		command_type = OTHER_COMMAND;
8684 		break;
8685 	case CMD_CLOSE_XRI_CN:
8686 	case CMD_ABORT_XRI_CN:
8687 	case CMD_ABORT_XRI_CX:
8688 		/* words 0-2 memcpy should be 0 rserved */
8689 		/* port will send abts */
8690 		abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8691 		if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8692 			abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8693 			fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8694 		} else
8695 			fip = 0;
8696 
8697 		if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
8698 			/*
8699 			 * The link is down, or the command was ELS_FIP
8700 			 * so the fw does not need to send abts
8701 			 * on the wire.
8702 			 */
8703 			bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8704 		else
8705 			bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8706 		bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
8707 		/* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8708 		wqe->abort_cmd.rsrvd5 = 0;
8709 		bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
8710 			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8711 		abort_tag = iocbq->iocb.un.acxri.abortIoTag;
8712 		/*
8713 		 * The abort handler will send us CMD_ABORT_XRI_CN or
8714 		 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8715 		 */
8716 		bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8717 		bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8718 		bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8719 		       LPFC_WQE_LENLOC_NONE);
8720 		cmnd = CMD_ABORT_XRI_CX;
8721 		command_type = OTHER_COMMAND;
8722 		xritag = 0;
8723 		break;
8724 	case CMD_XMIT_BLS_RSP64_CX:
8725 		ndlp = (struct lpfc_nodelist *)iocbq->context1;
8726 		/* As BLS ABTS RSP WQE is very different from other WQEs,
8727 		 * we re-construct this WQE here based on information in
8728 		 * iocbq from scratch.
8729 		 */
8730 		memset(wqe, 0, sizeof(union lpfc_wqe));
8731 		/* OX_ID is invariable to who sent ABTS to CT exchange */
8732 		bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
8733 		       bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8734 		if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
8735 		    LPFC_ABTS_UNSOL_INT) {
8736 			/* ABTS sent by initiator to CT exchange, the
8737 			 * RX_ID field will be filled with the newly
8738 			 * allocated responder XRI.
8739 			 */
8740 			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8741 			       iocbq->sli4_xritag);
8742 		} else {
8743 			/* ABTS sent by responder to CT exchange, the
8744 			 * RX_ID field will be filled with the responder
8745 			 * RX_ID from ABTS.
8746 			 */
8747 			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8748 			       bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
8749 		}
8750 		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8751 		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
8752 
8753 		/* Use CT=VPI */
8754 		bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8755 			ndlp->nlp_DID);
8756 		bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8757 			iocbq->iocb.ulpContext);
8758 		bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
8759 		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
8760 			phba->vpi_ids[phba->pport->vpi]);
8761 		bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8762 		bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8763 		       LPFC_WQE_LENLOC_NONE);
8764 		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
8765 		command_type = OTHER_COMMAND;
8766 		if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8767 			bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8768 			       bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8769 			bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8770 			       bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8771 			bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8772 			       bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8773 		}
8774 
8775 		break;
8776 	case CMD_XRI_ABORTED_CX:
8777 	case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
8778 	case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
8779 	case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
8780 	case CMD_FCP_TRSP64_CX: /* Target mode rcv */
8781 	case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
8782 	default:
8783 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8784 				"2014 Invalid command 0x%x\n",
8785 				iocbq->iocb.ulpCommand);
8786 		return IOCB_ERROR;
8787 		break;
8788 	}
8789 
8790 	if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
8791 		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
8792 	else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
8793 		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
8794 	else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
8795 		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
8796 	iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
8797 			      LPFC_IO_DIF_INSERT);
8798 	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8799 	bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8800 	wqe->generic.wqe_com.abort_tag = abort_tag;
8801 	bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8802 	bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8803 	bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8804 	bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
8805 	return 0;
8806 }
8807 
8808 /**
8809  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8810  * @phba: Pointer to HBA context object.
8811  * @ring_number: SLI ring number to issue iocb on.
8812  * @piocb: Pointer to command iocb.
8813  * @flag: Flag indicating if this command can be put into txq.
8814  *
8815  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8816  * an iocb command to an HBA with SLI-4 interface spec.
8817  *
8818  * This function is called with hbalock held. The function will return success
8819  * after it successfully submit the iocb to firmware or after adding to the
8820  * txq.
8821  **/
8822 static int
8823 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8824 			 struct lpfc_iocbq *piocb, uint32_t flag)
8825 {
8826 	struct lpfc_sglq *sglq;
8827 	union lpfc_wqe *wqe;
8828 	union lpfc_wqe128 wqe128;
8829 	struct lpfc_queue *wq;
8830 	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8831 
8832 	lockdep_assert_held(&phba->hbalock);
8833 
8834 	/*
8835 	 * The WQE can be either 64 or 128 bytes,
8836 	 * so allocate space on the stack assuming the largest.
8837 	 */
8838 	wqe = (union lpfc_wqe *)&wqe128;
8839 
8840 	if (piocb->sli4_xritag == NO_XRI) {
8841 		if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
8842 		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8843 			sglq = NULL;
8844 		else {
8845 			if (!list_empty(&pring->txq)) {
8846 				if (!(flag & SLI_IOCB_RET_IOCB)) {
8847 					__lpfc_sli_ringtx_put(phba,
8848 						pring, piocb);
8849 					return IOCB_SUCCESS;
8850 				} else {
8851 					return IOCB_BUSY;
8852 				}
8853 			} else {
8854 				sglq = __lpfc_sli_get_sglq(phba, piocb);
8855 				if (!sglq) {
8856 					if (!(flag & SLI_IOCB_RET_IOCB)) {
8857 						__lpfc_sli_ringtx_put(phba,
8858 								pring,
8859 								piocb);
8860 						return IOCB_SUCCESS;
8861 					} else
8862 						return IOCB_BUSY;
8863 				}
8864 			}
8865 		}
8866 	} else if (piocb->iocb_flag &  LPFC_IO_FCP) {
8867 		/* These IO's already have an XRI and a mapped sgl. */
8868 		sglq = NULL;
8869 	} else {
8870 		/*
8871 		 * This is a continuation of a commandi,(CX) so this
8872 		 * sglq is on the active list
8873 		 */
8874 		sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
8875 		if (!sglq)
8876 			return IOCB_ERROR;
8877 	}
8878 
8879 	if (sglq) {
8880 		piocb->sli4_lxritag = sglq->sli4_lxritag;
8881 		piocb->sli4_xritag = sglq->sli4_xritag;
8882 		if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
8883 			return IOCB_ERROR;
8884 	}
8885 
8886 	if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
8887 		return IOCB_ERROR;
8888 
8889 	if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8890 	    (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8891 		if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
8892 			wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
8893 		} else {
8894 			wq = phba->sli4_hba.oas_wq;
8895 		}
8896 		if (lpfc_sli4_wq_put(wq, wqe))
8897 			return IOCB_ERROR;
8898 	} else {
8899 		if (unlikely(!phba->sli4_hba.els_wq))
8900 			return IOCB_ERROR;
8901 		if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
8902 			return IOCB_ERROR;
8903 	}
8904 	lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8905 
8906 	return 0;
8907 }
8908 
8909 /**
8910  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8911  *
8912  * This routine wraps the actual lockless version for issusing IOCB function
8913  * pointer from the lpfc_hba struct.
8914  *
8915  * Return codes:
8916  * IOCB_ERROR - Error
8917  * IOCB_SUCCESS - Success
8918  * IOCB_BUSY - Busy
8919  **/
8920 int
8921 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8922 		struct lpfc_iocbq *piocb, uint32_t flag)
8923 {
8924 	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8925 }
8926 
8927 /**
8928  * lpfc_sli_api_table_setup - Set up sli api function jump table
8929  * @phba: The hba struct for which this call is being executed.
8930  * @dev_grp: The HBA PCI-Device group number.
8931  *
8932  * This routine sets up the SLI interface API function jump table in @phba
8933  * struct.
8934  * Returns: 0 - success, -ENODEV - failure.
8935  **/
8936 int
8937 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8938 {
8939 
8940 	switch (dev_grp) {
8941 	case LPFC_PCI_DEV_LP:
8942 		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8943 		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8944 		break;
8945 	case LPFC_PCI_DEV_OC:
8946 		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8947 		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8948 		break;
8949 	default:
8950 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8951 				"1419 Invalid HBA PCI-device group: 0x%x\n",
8952 				dev_grp);
8953 		return -ENODEV;
8954 		break;
8955 	}
8956 	phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8957 	return 0;
8958 }
8959 
8960 /**
8961  * lpfc_sli_calc_ring - Calculates which ring to use
8962  * @phba: Pointer to HBA context object.
8963  * @ring_number: Initial ring
8964  * @piocb: Pointer to command iocb.
8965  *
8966  * For SLI4, FCP IO can deferred to one fo many WQs, based on
8967  * fcp_wqidx, thus we need to calculate the corresponding ring.
8968  * Since ABORTS must go on the same WQ of the command they are
8969  * aborting, we use command's fcp_wqidx.
8970  */
8971 int
8972 lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
8973 		    struct lpfc_iocbq *piocb)
8974 {
8975 	if (phba->sli_rev < LPFC_SLI_REV4)
8976 		return ring_number;
8977 
8978 	if (piocb->iocb_flag &  (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
8979 		if (!(phba->cfg_fof) ||
8980 				(!(piocb->iocb_flag & LPFC_IO_FOF))) {
8981 			if (unlikely(!phba->sli4_hba.fcp_wq))
8982 				return LPFC_HBA_ERROR;
8983 			/*
8984 			 * for abort iocb fcp_wqidx should already
8985 			 * be setup based on what work queue we used.
8986 			 */
8987 			if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
8988 				piocb->fcp_wqidx =
8989 					lpfc_sli4_scmd_to_wqidx_distr(phba,
8990 							      piocb->context1);
8991 			ring_number = MAX_SLI3_CONFIGURED_RINGS +
8992 				piocb->fcp_wqidx;
8993 		} else {
8994 			if (unlikely(!phba->sli4_hba.oas_wq))
8995 				return LPFC_HBA_ERROR;
8996 			piocb->fcp_wqidx = 0;
8997 			ring_number =  LPFC_FCP_OAS_RING;
8998 		}
8999 	}
9000 	return ring_number;
9001 }
9002 
9003 /**
9004  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
9005  * @phba: Pointer to HBA context object.
9006  * @pring: Pointer to driver SLI ring object.
9007  * @piocb: Pointer to command iocb.
9008  * @flag: Flag indicating if this command can be put into txq.
9009  *
9010  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9011  * function. This function gets the hbalock and calls
9012  * __lpfc_sli_issue_iocb function and will return the error returned
9013  * by __lpfc_sli_issue_iocb function. This wrapper is used by
9014  * functions which do not hold hbalock.
9015  **/
9016 int
9017 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9018 		    struct lpfc_iocbq *piocb, uint32_t flag)
9019 {
9020 	struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9021 	struct lpfc_sli_ring *pring;
9022 	struct lpfc_queue *fpeq;
9023 	struct lpfc_eqe *eqe;
9024 	unsigned long iflags;
9025 	int rc, idx;
9026 
9027 	if (phba->sli_rev == LPFC_SLI_REV4) {
9028 		ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb);
9029 		if (unlikely(ring_number == LPFC_HBA_ERROR))
9030 			return IOCB_ERROR;
9031 		idx = piocb->fcp_wqidx;
9032 
9033 		pring = &phba->sli.ring[ring_number];
9034 		spin_lock_irqsave(&pring->ring_lock, iflags);
9035 		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9036 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
9037 
9038 		if (lpfc_fcp_look_ahead && (piocb->iocb_flag &  LPFC_IO_FCP)) {
9039 			fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
9040 
9041 			if (atomic_dec_and_test(&fcp_eq_hdl->
9042 				fcp_eq_in_use)) {
9043 
9044 				/* Get associated EQ with this index */
9045 				fpeq = phba->sli4_hba.hba_eq[idx];
9046 
9047 				/* Turn off interrupts from this EQ */
9048 				lpfc_sli4_eq_clr_intr(fpeq);
9049 
9050 				/*
9051 				 * Process all the events on FCP EQ
9052 				 */
9053 				while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9054 					lpfc_sli4_hba_handle_eqe(phba,
9055 						eqe, idx);
9056 					fpeq->EQ_processed++;
9057 				}
9058 
9059 				/* Always clear and re-arm the EQ */
9060 				lpfc_sli4_eq_release(fpeq,
9061 					LPFC_QUEUE_REARM);
9062 			}
9063 			atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
9064 		}
9065 	} else {
9066 		/* For now, SLI2/3 will still use hbalock */
9067 		spin_lock_irqsave(&phba->hbalock, iflags);
9068 		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9069 		spin_unlock_irqrestore(&phba->hbalock, iflags);
9070 	}
9071 	return rc;
9072 }
9073 
9074 /**
9075  * lpfc_extra_ring_setup - Extra ring setup function
9076  * @phba: Pointer to HBA context object.
9077  *
9078  * This function is called while driver attaches with the
9079  * HBA to setup the extra ring. The extra ring is used
9080  * only when driver needs to support target mode functionality
9081  * or IP over FC functionalities.
9082  *
9083  * This function is called with no lock held.
9084  **/
9085 static int
9086 lpfc_extra_ring_setup( struct lpfc_hba *phba)
9087 {
9088 	struct lpfc_sli *psli;
9089 	struct lpfc_sli_ring *pring;
9090 
9091 	psli = &phba->sli;
9092 
9093 	/* Adjust cmd/rsp ring iocb entries more evenly */
9094 
9095 	/* Take some away from the FCP ring */
9096 	pring = &psli->ring[psli->fcp_ring];
9097 	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9098 	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9099 	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9100 	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9101 
9102 	/* and give them to the extra ring */
9103 	pring = &psli->ring[psli->extra_ring];
9104 
9105 	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9106 	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9107 	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9108 	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9109 
9110 	/* Setup default profile for this ring */
9111 	pring->iotag_max = 4096;
9112 	pring->num_mask = 1;
9113 	pring->prt[0].profile = 0;      /* Mask 0 */
9114 	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9115 	pring->prt[0].type = phba->cfg_multi_ring_type;
9116 	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9117 	return 0;
9118 }
9119 
9120 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9121  * @phba: Pointer to HBA context object.
9122  * @iocbq: Pointer to iocb object.
9123  *
9124  * The async_event handler calls this routine when it receives
9125  * an ASYNC_STATUS_CN event from the port.  The port generates
9126  * this event when an Abort Sequence request to an rport fails
9127  * twice in succession.  The abort could be originated by the
9128  * driver or by the port.  The ABTS could have been for an ELS
9129  * or FCP IO.  The port only generates this event when an ABTS
9130  * fails to complete after one retry.
9131  */
9132 static void
9133 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9134 			  struct lpfc_iocbq *iocbq)
9135 {
9136 	struct lpfc_nodelist *ndlp = NULL;
9137 	uint16_t rpi = 0, vpi = 0;
9138 	struct lpfc_vport *vport = NULL;
9139 
9140 	/* The rpi in the ulpContext is vport-sensitive. */
9141 	vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9142 	rpi = iocbq->iocb.ulpContext;
9143 
9144 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9145 			"3092 Port generated ABTS async event "
9146 			"on vpi %d rpi %d status 0x%x\n",
9147 			vpi, rpi, iocbq->iocb.ulpStatus);
9148 
9149 	vport = lpfc_find_vport_by_vpid(phba, vpi);
9150 	if (!vport)
9151 		goto err_exit;
9152 	ndlp = lpfc_findnode_rpi(vport, rpi);
9153 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9154 		goto err_exit;
9155 
9156 	if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9157 		lpfc_sli_abts_recover_port(vport, ndlp);
9158 	return;
9159 
9160  err_exit:
9161 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9162 			"3095 Event Context not found, no "
9163 			"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9164 			iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9165 			vpi, rpi);
9166 }
9167 
9168 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9169  * @phba: pointer to HBA context object.
9170  * @ndlp: nodelist pointer for the impacted rport.
9171  * @axri: pointer to the wcqe containing the failed exchange.
9172  *
9173  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9174  * port.  The port generates this event when an abort exchange request to an
9175  * rport fails twice in succession with no reply.  The abort could be originated
9176  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
9177  */
9178 void
9179 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9180 			   struct lpfc_nodelist *ndlp,
9181 			   struct sli4_wcqe_xri_aborted *axri)
9182 {
9183 	struct lpfc_vport *vport;
9184 	uint32_t ext_status = 0;
9185 
9186 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9187 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9188 				"3115 Node Context not found, driver "
9189 				"ignoring abts err event\n");
9190 		return;
9191 	}
9192 
9193 	vport = ndlp->vport;
9194 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9195 			"3116 Port generated FCP XRI ABORT event on "
9196 			"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9197 			ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9198 			bf_get(lpfc_wcqe_xa_xri, axri),
9199 			bf_get(lpfc_wcqe_xa_status, axri),
9200 			axri->parameter);
9201 
9202 	/*
9203 	 * Catch the ABTS protocol failure case.  Older OCe FW releases returned
9204 	 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9205 	 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9206 	 */
9207 	ext_status = axri->parameter & IOERR_PARAM_MASK;
9208 	if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9209 	    ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9210 		lpfc_sli_abts_recover_port(vport, ndlp);
9211 }
9212 
9213 /**
9214  * lpfc_sli_async_event_handler - ASYNC iocb handler function
9215  * @phba: Pointer to HBA context object.
9216  * @pring: Pointer to driver SLI ring object.
9217  * @iocbq: Pointer to iocb object.
9218  *
9219  * This function is called by the slow ring event handler
9220  * function when there is an ASYNC event iocb in the ring.
9221  * This function is called with no lock held.
9222  * Currently this function handles only temperature related
9223  * ASYNC events. The function decodes the temperature sensor
9224  * event message and posts events for the management applications.
9225  **/
9226 static void
9227 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9228 	struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9229 {
9230 	IOCB_t *icmd;
9231 	uint16_t evt_code;
9232 	struct temp_event temp_event_data;
9233 	struct Scsi_Host *shost;
9234 	uint32_t *iocb_w;
9235 
9236 	icmd = &iocbq->iocb;
9237 	evt_code = icmd->un.asyncstat.evt_code;
9238 
9239 	switch (evt_code) {
9240 	case ASYNC_TEMP_WARN:
9241 	case ASYNC_TEMP_SAFE:
9242 		temp_event_data.data = (uint32_t) icmd->ulpContext;
9243 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9244 		if (evt_code == ASYNC_TEMP_WARN) {
9245 			temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9246 			lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9247 				"0347 Adapter is very hot, please take "
9248 				"corrective action. temperature : %d Celsius\n",
9249 				(uint32_t) icmd->ulpContext);
9250 		} else {
9251 			temp_event_data.event_code = LPFC_NORMAL_TEMP;
9252 			lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9253 				"0340 Adapter temperature is OK now. "
9254 				"temperature : %d Celsius\n",
9255 				(uint32_t) icmd->ulpContext);
9256 		}
9257 
9258 		/* Send temperature change event to applications */
9259 		shost = lpfc_shost_from_vport(phba->pport);
9260 		fc_host_post_vendor_event(shost, fc_get_event_number(),
9261 			sizeof(temp_event_data), (char *) &temp_event_data,
9262 			LPFC_NL_VENDOR_ID);
9263 		break;
9264 	case ASYNC_STATUS_CN:
9265 		lpfc_sli_abts_err_handler(phba, iocbq);
9266 		break;
9267 	default:
9268 		iocb_w = (uint32_t *) icmd;
9269 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9270 			"0346 Ring %d handler: unexpected ASYNC_STATUS"
9271 			" evt_code 0x%x\n"
9272 			"W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
9273 			"W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
9274 			"W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
9275 			"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9276 			pring->ringno, icmd->un.asyncstat.evt_code,
9277 			iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9278 			iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9279 			iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9280 			iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9281 
9282 		break;
9283 	}
9284 }
9285 
9286 
9287 /**
9288  * lpfc_sli_setup - SLI ring setup function
9289  * @phba: Pointer to HBA context object.
9290  *
9291  * lpfc_sli_setup sets up rings of the SLI interface with
9292  * number of iocbs per ring and iotags. This function is
9293  * called while driver attach to the HBA and before the
9294  * interrupts are enabled. So there is no need for locking.
9295  *
9296  * This function always returns 0.
9297  **/
9298 int
9299 lpfc_sli_setup(struct lpfc_hba *phba)
9300 {
9301 	int i, totiocbsize = 0;
9302 	struct lpfc_sli *psli = &phba->sli;
9303 	struct lpfc_sli_ring *pring;
9304 
9305 	psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
9306 	if (phba->sli_rev == LPFC_SLI_REV4)
9307 		psli->num_rings += phba->cfg_fcp_io_channel;
9308 	psli->sli_flag = 0;
9309 	psli->fcp_ring = LPFC_FCP_RING;
9310 	psli->next_ring = LPFC_FCP_NEXT_RING;
9311 	psli->extra_ring = LPFC_EXTRA_RING;
9312 
9313 	psli->iocbq_lookup = NULL;
9314 	psli->iocbq_lookup_len = 0;
9315 	psli->last_iotag = 0;
9316 
9317 	for (i = 0; i < psli->num_rings; i++) {
9318 		pring = &psli->ring[i];
9319 		switch (i) {
9320 		case LPFC_FCP_RING:	/* ring 0 - FCP */
9321 			/* numCiocb and numRiocb are used in config_port */
9322 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9323 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9324 			pring->sli.sli3.numCiocb +=
9325 				SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9326 			pring->sli.sli3.numRiocb +=
9327 				SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9328 			pring->sli.sli3.numCiocb +=
9329 				SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9330 			pring->sli.sli3.numRiocb +=
9331 				SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9332 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9333 							SLI3_IOCB_CMD_SIZE :
9334 							SLI2_IOCB_CMD_SIZE;
9335 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9336 							SLI3_IOCB_RSP_SIZE :
9337 							SLI2_IOCB_RSP_SIZE;
9338 			pring->iotag_ctr = 0;
9339 			pring->iotag_max =
9340 			    (phba->cfg_hba_queue_depth * 2);
9341 			pring->fast_iotag = pring->iotag_max;
9342 			pring->num_mask = 0;
9343 			break;
9344 		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
9345 			/* numCiocb and numRiocb are used in config_port */
9346 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
9347 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
9348 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9349 							SLI3_IOCB_CMD_SIZE :
9350 							SLI2_IOCB_CMD_SIZE;
9351 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9352 							SLI3_IOCB_RSP_SIZE :
9353 							SLI2_IOCB_RSP_SIZE;
9354 			pring->iotag_max = phba->cfg_hba_queue_depth;
9355 			pring->num_mask = 0;
9356 			break;
9357 		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
9358 			/* numCiocb and numRiocb are used in config_port */
9359 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
9360 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
9361 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9362 							SLI3_IOCB_CMD_SIZE :
9363 							SLI2_IOCB_CMD_SIZE;
9364 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9365 							SLI3_IOCB_RSP_SIZE :
9366 							SLI2_IOCB_RSP_SIZE;
9367 			pring->fast_iotag = 0;
9368 			pring->iotag_ctr = 0;
9369 			pring->iotag_max = 4096;
9370 			pring->lpfc_sli_rcv_async_status =
9371 				lpfc_sli_async_event_handler;
9372 			pring->num_mask = LPFC_MAX_RING_MASK;
9373 			pring->prt[0].profile = 0;	/* Mask 0 */
9374 			pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9375 			pring->prt[0].type = FC_TYPE_ELS;
9376 			pring->prt[0].lpfc_sli_rcv_unsol_event =
9377 			    lpfc_els_unsol_event;
9378 			pring->prt[1].profile = 0;	/* Mask 1 */
9379 			pring->prt[1].rctl = FC_RCTL_ELS_REP;
9380 			pring->prt[1].type = FC_TYPE_ELS;
9381 			pring->prt[1].lpfc_sli_rcv_unsol_event =
9382 			    lpfc_els_unsol_event;
9383 			pring->prt[2].profile = 0;	/* Mask 2 */
9384 			/* NameServer Inquiry */
9385 			pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9386 			/* NameServer */
9387 			pring->prt[2].type = FC_TYPE_CT;
9388 			pring->prt[2].lpfc_sli_rcv_unsol_event =
9389 			    lpfc_ct_unsol_event;
9390 			pring->prt[3].profile = 0;	/* Mask 3 */
9391 			/* NameServer response */
9392 			pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9393 			/* NameServer */
9394 			pring->prt[3].type = FC_TYPE_CT;
9395 			pring->prt[3].lpfc_sli_rcv_unsol_event =
9396 			    lpfc_ct_unsol_event;
9397 			break;
9398 		}
9399 		totiocbsize += (pring->sli.sli3.numCiocb *
9400 			pring->sli.sli3.sizeCiocb) +
9401 			(pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
9402 	}
9403 	if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
9404 		/* Too many cmd / rsp ring entries in SLI2 SLIM */
9405 		printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
9406 		       "SLI2 SLIM Data: x%x x%lx\n",
9407 		       phba->brd_no, totiocbsize,
9408 		       (unsigned long) MAX_SLIM_IOCB_SIZE);
9409 	}
9410 	if (phba->cfg_multi_ring_support == 2)
9411 		lpfc_extra_ring_setup(phba);
9412 
9413 	return 0;
9414 }
9415 
9416 /**
9417  * lpfc_sli_queue_setup - Queue initialization function
9418  * @phba: Pointer to HBA context object.
9419  *
9420  * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
9421  * ring. This function also initializes ring indices of each ring.
9422  * This function is called during the initialization of the SLI
9423  * interface of an HBA.
9424  * This function is called with no lock held and always returns
9425  * 1.
9426  **/
9427 int
9428 lpfc_sli_queue_setup(struct lpfc_hba *phba)
9429 {
9430 	struct lpfc_sli *psli;
9431 	struct lpfc_sli_ring *pring;
9432 	int i;
9433 
9434 	psli = &phba->sli;
9435 	spin_lock_irq(&phba->hbalock);
9436 	INIT_LIST_HEAD(&psli->mboxq);
9437 	INIT_LIST_HEAD(&psli->mboxq_cmpl);
9438 	/* Initialize list headers for txq and txcmplq as double linked lists */
9439 	for (i = 0; i < psli->num_rings; i++) {
9440 		pring = &psli->ring[i];
9441 		pring->ringno = i;
9442 		pring->sli.sli3.next_cmdidx  = 0;
9443 		pring->sli.sli3.local_getidx = 0;
9444 		pring->sli.sli3.cmdidx = 0;
9445 		pring->flag = 0;
9446 		INIT_LIST_HEAD(&pring->txq);
9447 		INIT_LIST_HEAD(&pring->txcmplq);
9448 		INIT_LIST_HEAD(&pring->iocb_continueq);
9449 		INIT_LIST_HEAD(&pring->iocb_continue_saveq);
9450 		INIT_LIST_HEAD(&pring->postbufq);
9451 		spin_lock_init(&pring->ring_lock);
9452 	}
9453 	spin_unlock_irq(&phba->hbalock);
9454 	return 1;
9455 }
9456 
9457 /**
9458  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
9459  * @phba: Pointer to HBA context object.
9460  *
9461  * This routine flushes the mailbox command subsystem. It will unconditionally
9462  * flush all the mailbox commands in the three possible stages in the mailbox
9463  * command sub-system: pending mailbox command queue; the outstanding mailbox
9464  * command; and completed mailbox command queue. It is caller's responsibility
9465  * to make sure that the driver is in the proper state to flush the mailbox
9466  * command sub-system. Namely, the posting of mailbox commands into the
9467  * pending mailbox command queue from the various clients must be stopped;
9468  * either the HBA is in a state that it will never works on the outstanding
9469  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
9470  * mailbox command has been completed.
9471  **/
9472 static void
9473 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9474 {
9475 	LIST_HEAD(completions);
9476 	struct lpfc_sli *psli = &phba->sli;
9477 	LPFC_MBOXQ_t *pmb;
9478 	unsigned long iflag;
9479 
9480 	/* Flush all the mailbox commands in the mbox system */
9481 	spin_lock_irqsave(&phba->hbalock, iflag);
9482 	/* The pending mailbox command queue */
9483 	list_splice_init(&phba->sli.mboxq, &completions);
9484 	/* The outstanding active mailbox command */
9485 	if (psli->mbox_active) {
9486 		list_add_tail(&psli->mbox_active->list, &completions);
9487 		psli->mbox_active = NULL;
9488 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9489 	}
9490 	/* The completed mailbox command queue */
9491 	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
9492 	spin_unlock_irqrestore(&phba->hbalock, iflag);
9493 
9494 	/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9495 	while (!list_empty(&completions)) {
9496 		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9497 		pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9498 		if (pmb->mbox_cmpl)
9499 			pmb->mbox_cmpl(phba, pmb);
9500 	}
9501 }
9502 
9503 /**
9504  * lpfc_sli_host_down - Vport cleanup function
9505  * @vport: Pointer to virtual port object.
9506  *
9507  * lpfc_sli_host_down is called to clean up the resources
9508  * associated with a vport before destroying virtual
9509  * port data structures.
9510  * This function does following operations:
9511  * - Free discovery resources associated with this virtual
9512  *   port.
9513  * - Free iocbs associated with this virtual port in
9514  *   the txq.
9515  * - Send abort for all iocb commands associated with this
9516  *   vport in txcmplq.
9517  *
9518  * This function is called with no lock held and always returns 1.
9519  **/
9520 int
9521 lpfc_sli_host_down(struct lpfc_vport *vport)
9522 {
9523 	LIST_HEAD(completions);
9524 	struct lpfc_hba *phba = vport->phba;
9525 	struct lpfc_sli *psli = &phba->sli;
9526 	struct lpfc_sli_ring *pring;
9527 	struct lpfc_iocbq *iocb, *next_iocb;
9528 	int i;
9529 	unsigned long flags = 0;
9530 	uint16_t prev_pring_flag;
9531 
9532 	lpfc_cleanup_discovery_resources(vport);
9533 
9534 	spin_lock_irqsave(&phba->hbalock, flags);
9535 	for (i = 0; i < psli->num_rings; i++) {
9536 		pring = &psli->ring[i];
9537 		prev_pring_flag = pring->flag;
9538 		/* Only slow rings */
9539 		if (pring->ringno == LPFC_ELS_RING) {
9540 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
9541 			/* Set the lpfc data pending flag */
9542 			set_bit(LPFC_DATA_READY, &phba->data_flags);
9543 		}
9544 		/*
9545 		 * Error everything on the txq since these iocbs have not been
9546 		 * given to the FW yet.
9547 		 */
9548 		list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
9549 			if (iocb->vport != vport)
9550 				continue;
9551 			list_move_tail(&iocb->list, &completions);
9552 		}
9553 
9554 		/* Next issue ABTS for everything on the txcmplq */
9555 		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
9556 									list) {
9557 			if (iocb->vport != vport)
9558 				continue;
9559 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9560 		}
9561 
9562 		pring->flag = prev_pring_flag;
9563 	}
9564 
9565 	spin_unlock_irqrestore(&phba->hbalock, flags);
9566 
9567 	/* Cancel all the IOCBs from the completions list */
9568 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9569 			      IOERR_SLI_DOWN);
9570 	return 1;
9571 }
9572 
9573 /**
9574  * lpfc_sli_hba_down - Resource cleanup function for the HBA
9575  * @phba: Pointer to HBA context object.
9576  *
9577  * This function cleans up all iocb, buffers, mailbox commands
9578  * while shutting down the HBA. This function is called with no
9579  * lock held and always returns 1.
9580  * This function does the following to cleanup driver resources:
9581  * - Free discovery resources for each virtual port
9582  * - Cleanup any pending fabric iocbs
9583  * - Iterate through the iocb txq and free each entry
9584  *   in the list.
9585  * - Free up any buffer posted to the HBA
9586  * - Free mailbox commands in the mailbox queue.
9587  **/
9588 int
9589 lpfc_sli_hba_down(struct lpfc_hba *phba)
9590 {
9591 	LIST_HEAD(completions);
9592 	struct lpfc_sli *psli = &phba->sli;
9593 	struct lpfc_sli_ring *pring;
9594 	struct lpfc_dmabuf *buf_ptr;
9595 	unsigned long flags = 0;
9596 	int i;
9597 
9598 	/* Shutdown the mailbox command sub-system */
9599 	lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
9600 
9601 	lpfc_hba_down_prep(phba);
9602 
9603 	lpfc_fabric_abort_hba(phba);
9604 
9605 	spin_lock_irqsave(&phba->hbalock, flags);
9606 	for (i = 0; i < psli->num_rings; i++) {
9607 		pring = &psli->ring[i];
9608 		/* Only slow rings */
9609 		if (pring->ringno == LPFC_ELS_RING) {
9610 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
9611 			/* Set the lpfc data pending flag */
9612 			set_bit(LPFC_DATA_READY, &phba->data_flags);
9613 		}
9614 
9615 		/*
9616 		 * Error everything on the txq since these iocbs have not been
9617 		 * given to the FW yet.
9618 		 */
9619 		list_splice_init(&pring->txq, &completions);
9620 	}
9621 	spin_unlock_irqrestore(&phba->hbalock, flags);
9622 
9623 	/* Cancel all the IOCBs from the completions list */
9624 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9625 			      IOERR_SLI_DOWN);
9626 
9627 	spin_lock_irqsave(&phba->hbalock, flags);
9628 	list_splice_init(&phba->elsbuf, &completions);
9629 	phba->elsbuf_cnt = 0;
9630 	phba->elsbuf_prev_cnt = 0;
9631 	spin_unlock_irqrestore(&phba->hbalock, flags);
9632 
9633 	while (!list_empty(&completions)) {
9634 		list_remove_head(&completions, buf_ptr,
9635 			struct lpfc_dmabuf, list);
9636 		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9637 		kfree(buf_ptr);
9638 	}
9639 
9640 	/* Return any active mbox cmds */
9641 	del_timer_sync(&psli->mbox_tmo);
9642 
9643 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
9644 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9645 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
9646 
9647 	return 1;
9648 }
9649 
9650 /**
9651  * lpfc_sli_pcimem_bcopy - SLI memory copy function
9652  * @srcp: Source memory pointer.
9653  * @destp: Destination memory pointer.
9654  * @cnt: Number of words required to be copied.
9655  *
9656  * This function is used for copying data between driver memory
9657  * and the SLI memory. This function also changes the endianness
9658  * of each word if native endianness is different from SLI
9659  * endianness. This function can be called with or without
9660  * lock.
9661  **/
9662 void
9663 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9664 {
9665 	uint32_t *src = srcp;
9666 	uint32_t *dest = destp;
9667 	uint32_t ldata;
9668 	int i;
9669 
9670 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9671 		ldata = *src;
9672 		ldata = le32_to_cpu(ldata);
9673 		*dest = ldata;
9674 		src++;
9675 		dest++;
9676 	}
9677 }
9678 
9679 
9680 /**
9681  * lpfc_sli_bemem_bcopy - SLI memory copy function
9682  * @srcp: Source memory pointer.
9683  * @destp: Destination memory pointer.
9684  * @cnt: Number of words required to be copied.
9685  *
9686  * This function is used for copying data between a data structure
9687  * with big endian representation to local endianness.
9688  * This function can be called with or without lock.
9689  **/
9690 void
9691 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9692 {
9693 	uint32_t *src = srcp;
9694 	uint32_t *dest = destp;
9695 	uint32_t ldata;
9696 	int i;
9697 
9698 	for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9699 		ldata = *src;
9700 		ldata = be32_to_cpu(ldata);
9701 		*dest = ldata;
9702 		src++;
9703 		dest++;
9704 	}
9705 }
9706 
9707 /**
9708  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
9709  * @phba: Pointer to HBA context object.
9710  * @pring: Pointer to driver SLI ring object.
9711  * @mp: Pointer to driver buffer object.
9712  *
9713  * This function is called with no lock held.
9714  * It always return zero after adding the buffer to the postbufq
9715  * buffer list.
9716  **/
9717 int
9718 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9719 			 struct lpfc_dmabuf *mp)
9720 {
9721 	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9722 	   later */
9723 	spin_lock_irq(&phba->hbalock);
9724 	list_add_tail(&mp->list, &pring->postbufq);
9725 	pring->postbufq_cnt++;
9726 	spin_unlock_irq(&phba->hbalock);
9727 	return 0;
9728 }
9729 
9730 /**
9731  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
9732  * @phba: Pointer to HBA context object.
9733  *
9734  * When HBQ is enabled, buffers are searched based on tags. This function
9735  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9736  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9737  * does not conflict with tags of buffer posted for unsolicited events.
9738  * The function returns the allocated tag. The function is called with
9739  * no locks held.
9740  **/
9741 uint32_t
9742 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9743 {
9744 	spin_lock_irq(&phba->hbalock);
9745 	phba->buffer_tag_count++;
9746 	/*
9747 	 * Always set the QUE_BUFTAG_BIT to distiguish between
9748 	 * a tag assigned by HBQ.
9749 	 */
9750 	phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9751 	spin_unlock_irq(&phba->hbalock);
9752 	return phba->buffer_tag_count;
9753 }
9754 
9755 /**
9756  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
9757  * @phba: Pointer to HBA context object.
9758  * @pring: Pointer to driver SLI ring object.
9759  * @tag: Buffer tag.
9760  *
9761  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9762  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9763  * iocb is posted to the response ring with the tag of the buffer.
9764  * This function searches the pring->postbufq list using the tag
9765  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9766  * iocb. If the buffer is found then lpfc_dmabuf object of the
9767  * buffer is returned to the caller else NULL is returned.
9768  * This function is called with no lock held.
9769  **/
9770 struct lpfc_dmabuf *
9771 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9772 			uint32_t tag)
9773 {
9774 	struct lpfc_dmabuf *mp, *next_mp;
9775 	struct list_head *slp = &pring->postbufq;
9776 
9777 	/* Search postbufq, from the beginning, looking for a match on tag */
9778 	spin_lock_irq(&phba->hbalock);
9779 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9780 		if (mp->buffer_tag == tag) {
9781 			list_del_init(&mp->list);
9782 			pring->postbufq_cnt--;
9783 			spin_unlock_irq(&phba->hbalock);
9784 			return mp;
9785 		}
9786 	}
9787 
9788 	spin_unlock_irq(&phba->hbalock);
9789 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9790 			"0402 Cannot find virtual addr for buffer tag on "
9791 			"ring %d Data x%lx x%p x%p x%x\n",
9792 			pring->ringno, (unsigned long) tag,
9793 			slp->next, slp->prev, pring->postbufq_cnt);
9794 
9795 	return NULL;
9796 }
9797 
9798 /**
9799  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
9800  * @phba: Pointer to HBA context object.
9801  * @pring: Pointer to driver SLI ring object.
9802  * @phys: DMA address of the buffer.
9803  *
9804  * This function searches the buffer list using the dma_address
9805  * of unsolicited event to find the driver's lpfc_dmabuf object
9806  * corresponding to the dma_address. The function returns the
9807  * lpfc_dmabuf object if a buffer is found else it returns NULL.
9808  * This function is called by the ct and els unsolicited event
9809  * handlers to get the buffer associated with the unsolicited
9810  * event.
9811  *
9812  * This function is called with no lock held.
9813  **/
9814 struct lpfc_dmabuf *
9815 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9816 			 dma_addr_t phys)
9817 {
9818 	struct lpfc_dmabuf *mp, *next_mp;
9819 	struct list_head *slp = &pring->postbufq;
9820 
9821 	/* Search postbufq, from the beginning, looking for a match on phys */
9822 	spin_lock_irq(&phba->hbalock);
9823 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9824 		if (mp->phys == phys) {
9825 			list_del_init(&mp->list);
9826 			pring->postbufq_cnt--;
9827 			spin_unlock_irq(&phba->hbalock);
9828 			return mp;
9829 		}
9830 	}
9831 
9832 	spin_unlock_irq(&phba->hbalock);
9833 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9834 			"0410 Cannot find virtual addr for mapped buf on "
9835 			"ring %d Data x%llx x%p x%p x%x\n",
9836 			pring->ringno, (unsigned long long)phys,
9837 			slp->next, slp->prev, pring->postbufq_cnt);
9838 	return NULL;
9839 }
9840 
9841 /**
9842  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
9843  * @phba: Pointer to HBA context object.
9844  * @cmdiocb: Pointer to driver command iocb object.
9845  * @rspiocb: Pointer to driver response iocb object.
9846  *
9847  * This function is the completion handler for the abort iocbs for
9848  * ELS commands. This function is called from the ELS ring event
9849  * handler with no lock held. This function frees memory resources
9850  * associated with the abort iocb.
9851  **/
9852 static void
9853 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9854 			struct lpfc_iocbq *rspiocb)
9855 {
9856 	IOCB_t *irsp = &rspiocb->iocb;
9857 	uint16_t abort_iotag, abort_context;
9858 	struct lpfc_iocbq *abort_iocb = NULL;
9859 
9860 	if (irsp->ulpStatus) {
9861 
9862 		/*
9863 		 * Assume that the port already completed and returned, or
9864 		 * will return the iocb. Just Log the message.
9865 		 */
9866 		abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9867 		abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9868 
9869 		spin_lock_irq(&phba->hbalock);
9870 		if (phba->sli_rev < LPFC_SLI_REV4) {
9871 			if (abort_iotag != 0 &&
9872 				abort_iotag <= phba->sli.last_iotag)
9873 				abort_iocb =
9874 					phba->sli.iocbq_lookup[abort_iotag];
9875 		} else
9876 			/* For sli4 the abort_tag is the XRI,
9877 			 * so the abort routine puts the iotag  of the iocb
9878 			 * being aborted in the context field of the abort
9879 			 * IOCB.
9880 			 */
9881 			abort_iocb = phba->sli.iocbq_lookup[abort_context];
9882 
9883 		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9884 				"0327 Cannot abort els iocb %p "
9885 				"with tag %x context %x, abort status %x, "
9886 				"abort code %x\n",
9887 				abort_iocb, abort_iotag, abort_context,
9888 				irsp->ulpStatus, irsp->un.ulpWord[4]);
9889 
9890 		spin_unlock_irq(&phba->hbalock);
9891 	}
9892 	lpfc_sli_release_iocbq(phba, cmdiocb);
9893 	return;
9894 }
9895 
9896 /**
9897  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
9898  * @phba: Pointer to HBA context object.
9899  * @cmdiocb: Pointer to driver command iocb object.
9900  * @rspiocb: Pointer to driver response iocb object.
9901  *
9902  * The function is called from SLI ring event handler with no
9903  * lock held. This function is the completion handler for ELS commands
9904  * which are aborted. The function frees memory resources used for
9905  * the aborted ELS commands.
9906  **/
9907 static void
9908 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9909 		     struct lpfc_iocbq *rspiocb)
9910 {
9911 	IOCB_t *irsp = &rspiocb->iocb;
9912 
9913 	/* ELS cmd tag <ulpIoTag> completes */
9914 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9915 			"0139 Ignoring ELS cmd tag x%x completion Data: "
9916 			"x%x x%x x%x\n",
9917 			irsp->ulpIoTag, irsp->ulpStatus,
9918 			irsp->un.ulpWord[4], irsp->ulpTimeout);
9919 	if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9920 		lpfc_ct_free_iocb(phba, cmdiocb);
9921 	else
9922 		lpfc_els_free_iocb(phba, cmdiocb);
9923 	return;
9924 }
9925 
9926 /**
9927  * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
9928  * @phba: Pointer to HBA context object.
9929  * @pring: Pointer to driver SLI ring object.
9930  * @cmdiocb: Pointer to driver command iocb object.
9931  *
9932  * This function issues an abort iocb for the provided command iocb down to
9933  * the port. Other than the case the outstanding command iocb is an abort
9934  * request, this function issues abort out unconditionally. This function is
9935  * called with hbalock held. The function returns 0 when it fails due to
9936  * memory allocation failure or when the command iocb is an abort request.
9937  **/
9938 static int
9939 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9940 			   struct lpfc_iocbq *cmdiocb)
9941 {
9942 	struct lpfc_vport *vport = cmdiocb->vport;
9943 	struct lpfc_iocbq *abtsiocbp;
9944 	IOCB_t *icmd = NULL;
9945 	IOCB_t *iabt = NULL;
9946 	int ring_number;
9947 	int retval;
9948 	unsigned long iflags;
9949 
9950 	lockdep_assert_held(&phba->hbalock);
9951 
9952 	/*
9953 	 * There are certain command types we don't want to abort.  And we
9954 	 * don't want to abort commands that are already in the process of
9955 	 * being aborted.
9956 	 */
9957 	icmd = &cmdiocb->iocb;
9958 	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9959 	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9960 	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9961 		return 0;
9962 
9963 	/* issue ABTS for this IOCB based on iotag */
9964 	abtsiocbp = __lpfc_sli_get_iocbq(phba);
9965 	if (abtsiocbp == NULL)
9966 		return 0;
9967 
9968 	/* This signals the response to set the correct status
9969 	 * before calling the completion handler
9970 	 */
9971 	cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
9972 
9973 	iabt = &abtsiocbp->iocb;
9974 	iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
9975 	iabt->un.acxri.abortContextTag = icmd->ulpContext;
9976 	if (phba->sli_rev == LPFC_SLI_REV4) {
9977 		iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
9978 		iabt->un.acxri.abortContextTag = cmdiocb->iotag;
9979 	}
9980 	else
9981 		iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
9982 	iabt->ulpLe = 1;
9983 	iabt->ulpClass = icmd->ulpClass;
9984 
9985 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
9986 	abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
9987 	if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9988 		abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9989 	if (cmdiocb->iocb_flag & LPFC_IO_FOF)
9990 		abtsiocbp->iocb_flag |= LPFC_IO_FOF;
9991 
9992 	if (phba->link_state >= LPFC_LINK_UP)
9993 		iabt->ulpCommand = CMD_ABORT_XRI_CN;
9994 	else
9995 		iabt->ulpCommand = CMD_CLOSE_XRI_CN;
9996 
9997 	abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
9998 
9999 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10000 			 "0339 Abort xri x%x, original iotag x%x, "
10001 			 "abort cmd iotag x%x\n",
10002 			 iabt->un.acxri.abortIoTag,
10003 			 iabt->un.acxri.abortContextTag,
10004 			 abtsiocbp->iotag);
10005 
10006 	if (phba->sli_rev == LPFC_SLI_REV4) {
10007 		ring_number =
10008 			lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp);
10009 		if (unlikely(ring_number == LPFC_HBA_ERROR))
10010 			return 0;
10011 		pring = &phba->sli.ring[ring_number];
10012 		/* Note: both hbalock and ring_lock need to be set here */
10013 		spin_lock_irqsave(&pring->ring_lock, iflags);
10014 		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10015 			abtsiocbp, 0);
10016 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
10017 	} else {
10018 		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10019 			abtsiocbp, 0);
10020 	}
10021 
10022 	if (retval)
10023 		__lpfc_sli_release_iocbq(phba, abtsiocbp);
10024 
10025 	/*
10026 	 * Caller to this routine should check for IOCB_ERROR
10027 	 * and handle it properly.  This routine no longer removes
10028 	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10029 	 */
10030 	return retval;
10031 }
10032 
10033 /**
10034  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10035  * @phba: Pointer to HBA context object.
10036  * @pring: Pointer to driver SLI ring object.
10037  * @cmdiocb: Pointer to driver command iocb object.
10038  *
10039  * This function issues an abort iocb for the provided command iocb. In case
10040  * of unloading, the abort iocb will not be issued to commands on the ELS
10041  * ring. Instead, the callback function shall be changed to those commands
10042  * so that nothing happens when them finishes. This function is called with
10043  * hbalock held. The function returns 0 when the command iocb is an abort
10044  * request.
10045  **/
10046 int
10047 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10048 			   struct lpfc_iocbq *cmdiocb)
10049 {
10050 	struct lpfc_vport *vport = cmdiocb->vport;
10051 	int retval = IOCB_ERROR;
10052 	IOCB_t *icmd = NULL;
10053 
10054 	lockdep_assert_held(&phba->hbalock);
10055 
10056 	/*
10057 	 * There are certain command types we don't want to abort.  And we
10058 	 * don't want to abort commands that are already in the process of
10059 	 * being aborted.
10060 	 */
10061 	icmd = &cmdiocb->iocb;
10062 	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10063 	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10064 	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10065 		return 0;
10066 
10067 	/*
10068 	 * If we're unloading, don't abort iocb on the ELS ring, but change
10069 	 * the callback so that nothing happens when it finishes.
10070 	 */
10071 	if ((vport->load_flag & FC_UNLOADING) &&
10072 	    (pring->ringno == LPFC_ELS_RING)) {
10073 		if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10074 			cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10075 		else
10076 			cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10077 		goto abort_iotag_exit;
10078 	}
10079 
10080 	/* Now, we try to issue the abort to the cmdiocb out */
10081 	retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10082 
10083 abort_iotag_exit:
10084 	/*
10085 	 * Caller to this routine should check for IOCB_ERROR
10086 	 * and handle it properly.  This routine no longer removes
10087 	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10088 	 */
10089 	return retval;
10090 }
10091 
10092 /**
10093  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
10094  * @phba: pointer to lpfc HBA data structure.
10095  *
10096  * This routine will abort all pending and outstanding iocbs to an HBA.
10097  **/
10098 void
10099 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
10100 {
10101 	struct lpfc_sli *psli = &phba->sli;
10102 	struct lpfc_sli_ring *pring;
10103 	int i;
10104 
10105 	for (i = 0; i < psli->num_rings; i++) {
10106 		pring = &psli->ring[i];
10107 		lpfc_sli_abort_iocb_ring(phba, pring);
10108 	}
10109 }
10110 
10111 /**
10112  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
10113  * @iocbq: Pointer to driver iocb object.
10114  * @vport: Pointer to driver virtual port object.
10115  * @tgt_id: SCSI ID of the target.
10116  * @lun_id: LUN ID of the scsi device.
10117  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
10118  *
10119  * This function acts as an iocb filter for functions which abort or count
10120  * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
10121  * 0 if the filtering criteria is met for the given iocb and will return
10122  * 1 if the filtering criteria is not met.
10123  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
10124  * given iocb is for the SCSI device specified by vport, tgt_id and
10125  * lun_id parameter.
10126  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
10127  * given iocb is for the SCSI target specified by vport and tgt_id
10128  * parameters.
10129  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
10130  * given iocb is for the SCSI host associated with the given vport.
10131  * This function is called with no locks held.
10132  **/
10133 static int
10134 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
10135 			   uint16_t tgt_id, uint64_t lun_id,
10136 			   lpfc_ctx_cmd ctx_cmd)
10137 {
10138 	struct lpfc_scsi_buf *lpfc_cmd;
10139 	int rc = 1;
10140 
10141 	if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
10142 		return rc;
10143 
10144 	if (iocbq->vport != vport)
10145 		return rc;
10146 
10147 	lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10148 
10149 	if (lpfc_cmd->pCmd == NULL)
10150 		return rc;
10151 
10152 	switch (ctx_cmd) {
10153 	case LPFC_CTX_LUN:
10154 		if ((lpfc_cmd->rdata->pnode) &&
10155 		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
10156 		    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
10157 			rc = 0;
10158 		break;
10159 	case LPFC_CTX_TGT:
10160 		if ((lpfc_cmd->rdata->pnode) &&
10161 		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
10162 			rc = 0;
10163 		break;
10164 	case LPFC_CTX_HOST:
10165 		rc = 0;
10166 		break;
10167 	default:
10168 		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
10169 			__func__, ctx_cmd);
10170 		break;
10171 	}
10172 
10173 	return rc;
10174 }
10175 
10176 /**
10177  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
10178  * @vport: Pointer to virtual port.
10179  * @tgt_id: SCSI ID of the target.
10180  * @lun_id: LUN ID of the scsi device.
10181  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10182  *
10183  * This function returns number of FCP commands pending for the vport.
10184  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
10185  * commands pending on the vport associated with SCSI device specified
10186  * by tgt_id and lun_id parameters.
10187  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
10188  * commands pending on the vport associated with SCSI target specified
10189  * by tgt_id parameter.
10190  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
10191  * commands pending on the vport.
10192  * This function returns the number of iocbs which satisfy the filter.
10193  * This function is called without any lock held.
10194  **/
10195 int
10196 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
10197 		  lpfc_ctx_cmd ctx_cmd)
10198 {
10199 	struct lpfc_hba *phba = vport->phba;
10200 	struct lpfc_iocbq *iocbq;
10201 	int sum, i;
10202 
10203 	spin_lock_irq(&phba->hbalock);
10204 	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
10205 		iocbq = phba->sli.iocbq_lookup[i];
10206 
10207 		if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
10208 						ctx_cmd) == 0)
10209 			sum++;
10210 	}
10211 	spin_unlock_irq(&phba->hbalock);
10212 
10213 	return sum;
10214 }
10215 
10216 /**
10217  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
10218  * @phba: Pointer to HBA context object
10219  * @cmdiocb: Pointer to command iocb object.
10220  * @rspiocb: Pointer to response iocb object.
10221  *
10222  * This function is called when an aborted FCP iocb completes. This
10223  * function is called by the ring event handler with no lock held.
10224  * This function frees the iocb.
10225  **/
10226 void
10227 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10228 			struct lpfc_iocbq *rspiocb)
10229 {
10230 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10231 			"3096 ABORT_XRI_CN completing on rpi x%x "
10232 			"original iotag x%x, abort cmd iotag x%x "
10233 			"status 0x%x, reason 0x%x\n",
10234 			cmdiocb->iocb.un.acxri.abortContextTag,
10235 			cmdiocb->iocb.un.acxri.abortIoTag,
10236 			cmdiocb->iotag, rspiocb->iocb.ulpStatus,
10237 			rspiocb->iocb.un.ulpWord[4]);
10238 	lpfc_sli_release_iocbq(phba, cmdiocb);
10239 	return;
10240 }
10241 
10242 /**
10243  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
10244  * @vport: Pointer to virtual port.
10245  * @pring: Pointer to driver SLI ring object.
10246  * @tgt_id: SCSI ID of the target.
10247  * @lun_id: LUN ID of the scsi device.
10248  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10249  *
10250  * This function sends an abort command for every SCSI command
10251  * associated with the given virtual port pending on the ring
10252  * filtered by lpfc_sli_validate_fcp_iocb function.
10253  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
10254  * FCP iocbs associated with lun specified by tgt_id and lun_id
10255  * parameters
10256  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
10257  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10258  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
10259  * FCP iocbs associated with virtual port.
10260  * This function returns number of iocbs it failed to abort.
10261  * This function is called with no locks held.
10262  **/
10263 int
10264 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10265 		    uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
10266 {
10267 	struct lpfc_hba *phba = vport->phba;
10268 	struct lpfc_iocbq *iocbq;
10269 	struct lpfc_iocbq *abtsiocb;
10270 	IOCB_t *cmd = NULL;
10271 	int errcnt = 0, ret_val = 0;
10272 	int i;
10273 
10274 	for (i = 1; i <= phba->sli.last_iotag; i++) {
10275 		iocbq = phba->sli.iocbq_lookup[i];
10276 
10277 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10278 					       abort_cmd) != 0)
10279 			continue;
10280 
10281 		/*
10282 		 * If the iocbq is already being aborted, don't take a second
10283 		 * action, but do count it.
10284 		 */
10285 		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10286 			continue;
10287 
10288 		/* issue ABTS for this IOCB based on iotag */
10289 		abtsiocb = lpfc_sli_get_iocbq(phba);
10290 		if (abtsiocb == NULL) {
10291 			errcnt++;
10292 			continue;
10293 		}
10294 
10295 		/* indicate the IO is being aborted by the driver. */
10296 		iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10297 
10298 		cmd = &iocbq->iocb;
10299 		abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10300 		abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
10301 		if (phba->sli_rev == LPFC_SLI_REV4)
10302 			abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
10303 		else
10304 			abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
10305 		abtsiocb->iocb.ulpLe = 1;
10306 		abtsiocb->iocb.ulpClass = cmd->ulpClass;
10307 		abtsiocb->vport = vport;
10308 
10309 		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
10310 		abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
10311 		if (iocbq->iocb_flag & LPFC_IO_FCP)
10312 			abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
10313 		if (iocbq->iocb_flag & LPFC_IO_FOF)
10314 			abtsiocb->iocb_flag |= LPFC_IO_FOF;
10315 
10316 		if (lpfc_is_link_up(phba))
10317 			abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10318 		else
10319 			abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10320 
10321 		/* Setup callback routine and issue the command. */
10322 		abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10323 		ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
10324 					      abtsiocb, 0);
10325 		if (ret_val == IOCB_ERROR) {
10326 			lpfc_sli_release_iocbq(phba, abtsiocb);
10327 			errcnt++;
10328 			continue;
10329 		}
10330 	}
10331 
10332 	return errcnt;
10333 }
10334 
10335 /**
10336  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
10337  * @vport: Pointer to virtual port.
10338  * @pring: Pointer to driver SLI ring object.
10339  * @tgt_id: SCSI ID of the target.
10340  * @lun_id: LUN ID of the scsi device.
10341  * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10342  *
10343  * This function sends an abort command for every SCSI command
10344  * associated with the given virtual port pending on the ring
10345  * filtered by lpfc_sli_validate_fcp_iocb function.
10346  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
10347  * FCP iocbs associated with lun specified by tgt_id and lun_id
10348  * parameters
10349  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
10350  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10351  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
10352  * FCP iocbs associated with virtual port.
10353  * This function returns number of iocbs it aborted .
10354  * This function is called with no locks held right after a taskmgmt
10355  * command is sent.
10356  **/
10357 int
10358 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10359 			uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10360 {
10361 	struct lpfc_hba *phba = vport->phba;
10362 	struct lpfc_scsi_buf *lpfc_cmd;
10363 	struct lpfc_iocbq *abtsiocbq;
10364 	struct lpfc_nodelist *ndlp;
10365 	struct lpfc_iocbq *iocbq;
10366 	IOCB_t *icmd;
10367 	int sum, i, ret_val;
10368 	unsigned long iflags;
10369 	struct lpfc_sli_ring *pring_s4;
10370 	uint32_t ring_number;
10371 
10372 	spin_lock_irq(&phba->hbalock);
10373 
10374 	/* all I/Os are in process of being flushed */
10375 	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
10376 		spin_unlock_irq(&phba->hbalock);
10377 		return 0;
10378 	}
10379 	sum = 0;
10380 
10381 	for (i = 1; i <= phba->sli.last_iotag; i++) {
10382 		iocbq = phba->sli.iocbq_lookup[i];
10383 
10384 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10385 					       cmd) != 0)
10386 			continue;
10387 
10388 		/*
10389 		 * If the iocbq is already being aborted, don't take a second
10390 		 * action, but do count it.
10391 		 */
10392 		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10393 			continue;
10394 
10395 		/* issue ABTS for this IOCB based on iotag */
10396 		abtsiocbq = __lpfc_sli_get_iocbq(phba);
10397 		if (abtsiocbq == NULL)
10398 			continue;
10399 
10400 		icmd = &iocbq->iocb;
10401 		abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10402 		abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
10403 		if (phba->sli_rev == LPFC_SLI_REV4)
10404 			abtsiocbq->iocb.un.acxri.abortIoTag =
10405 							 iocbq->sli4_xritag;
10406 		else
10407 			abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
10408 		abtsiocbq->iocb.ulpLe = 1;
10409 		abtsiocbq->iocb.ulpClass = icmd->ulpClass;
10410 		abtsiocbq->vport = vport;
10411 
10412 		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
10413 		abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
10414 		if (iocbq->iocb_flag & LPFC_IO_FCP)
10415 			abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
10416 		if (iocbq->iocb_flag & LPFC_IO_FOF)
10417 			abtsiocbq->iocb_flag |= LPFC_IO_FOF;
10418 
10419 		lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10420 		ndlp = lpfc_cmd->rdata->pnode;
10421 
10422 		if (lpfc_is_link_up(phba) &&
10423 		    (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
10424 			abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10425 		else
10426 			abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10427 
10428 		/* Setup callback routine and issue the command. */
10429 		abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10430 
10431 		/*
10432 		 * Indicate the IO is being aborted by the driver and set
10433 		 * the caller's flag into the aborted IO.
10434 		 */
10435 		iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10436 
10437 		if (phba->sli_rev == LPFC_SLI_REV4) {
10438 			ring_number = MAX_SLI3_CONFIGURED_RINGS +
10439 					 iocbq->fcp_wqidx;
10440 			pring_s4 = &phba->sli.ring[ring_number];
10441 			/* Note: both hbalock and ring_lock must be set here */
10442 			spin_lock_irqsave(&pring_s4->ring_lock, iflags);
10443 			ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
10444 							abtsiocbq, 0);
10445 			spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
10446 		} else {
10447 			ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
10448 							abtsiocbq, 0);
10449 		}
10450 
10451 
10452 		if (ret_val == IOCB_ERROR)
10453 			__lpfc_sli_release_iocbq(phba, abtsiocbq);
10454 		else
10455 			sum++;
10456 	}
10457 	spin_unlock_irq(&phba->hbalock);
10458 	return sum;
10459 }
10460 
10461 /**
10462  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
10463  * @phba: Pointer to HBA context object.
10464  * @cmdiocbq: Pointer to command iocb.
10465  * @rspiocbq: Pointer to response iocb.
10466  *
10467  * This function is the completion handler for iocbs issued using
10468  * lpfc_sli_issue_iocb_wait function. This function is called by the
10469  * ring event handler function without any lock held. This function
10470  * can be called from both worker thread context and interrupt
10471  * context. This function also can be called from other thread which
10472  * cleans up the SLI layer objects.
10473  * This function copy the contents of the response iocb to the
10474  * response iocb memory object provided by the caller of
10475  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
10476  * sleeps for the iocb completion.
10477  **/
10478 static void
10479 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
10480 			struct lpfc_iocbq *cmdiocbq,
10481 			struct lpfc_iocbq *rspiocbq)
10482 {
10483 	wait_queue_head_t *pdone_q;
10484 	unsigned long iflags;
10485 	struct lpfc_scsi_buf *lpfc_cmd;
10486 
10487 	spin_lock_irqsave(&phba->hbalock, iflags);
10488 	if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
10489 
10490 		/*
10491 		 * A time out has occurred for the iocb.  If a time out
10492 		 * completion handler has been supplied, call it.  Otherwise,
10493 		 * just free the iocbq.
10494 		 */
10495 
10496 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10497 		cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
10498 		cmdiocbq->wait_iocb_cmpl = NULL;
10499 		if (cmdiocbq->iocb_cmpl)
10500 			(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
10501 		else
10502 			lpfc_sli_release_iocbq(phba, cmdiocbq);
10503 		return;
10504 	}
10505 
10506 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
10507 	if (cmdiocbq->context2 && rspiocbq)
10508 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
10509 		       &rspiocbq->iocb, sizeof(IOCB_t));
10510 
10511 	/* Set the exchange busy flag for task management commands */
10512 	if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
10513 		!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
10514 		lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
10515 			cur_iocbq);
10516 		lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
10517 	}
10518 
10519 	pdone_q = cmdiocbq->context_un.wait_queue;
10520 	if (pdone_q)
10521 		wake_up(pdone_q);
10522 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10523 	return;
10524 }
10525 
10526 /**
10527  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
10528  * @phba: Pointer to HBA context object..
10529  * @piocbq: Pointer to command iocb.
10530  * @flag: Flag to test.
10531  *
10532  * This routine grabs the hbalock and then test the iocb_flag to
10533  * see if the passed in flag is set.
10534  * Returns:
10535  * 1 if flag is set.
10536  * 0 if flag is not set.
10537  **/
10538 static int
10539 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
10540 		 struct lpfc_iocbq *piocbq, uint32_t flag)
10541 {
10542 	unsigned long iflags;
10543 	int ret;
10544 
10545 	spin_lock_irqsave(&phba->hbalock, iflags);
10546 	ret = piocbq->iocb_flag & flag;
10547 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10548 	return ret;
10549 
10550 }
10551 
10552 /**
10553  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
10554  * @phba: Pointer to HBA context object..
10555  * @pring: Pointer to sli ring.
10556  * @piocb: Pointer to command iocb.
10557  * @prspiocbq: Pointer to response iocb.
10558  * @timeout: Timeout in number of seconds.
10559  *
10560  * This function issues the iocb to firmware and waits for the
10561  * iocb to complete. The iocb_cmpl field of the shall be used
10562  * to handle iocbs which time out. If the field is NULL, the
10563  * function shall free the iocbq structure.  If more clean up is
10564  * needed, the caller is expected to provide a completion function
10565  * that will provide the needed clean up.  If the iocb command is
10566  * not completed within timeout seconds, the function will either
10567  * free the iocbq structure (if iocb_cmpl == NULL) or execute the
10568  * completion function set in the iocb_cmpl field and then return
10569  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
10570  * resources if this function returns IOCB_TIMEDOUT.
10571  * The function waits for the iocb completion using an
10572  * non-interruptible wait.
10573  * This function will sleep while waiting for iocb completion.
10574  * So, this function should not be called from any context which
10575  * does not allow sleeping. Due to the same reason, this function
10576  * cannot be called with interrupt disabled.
10577  * This function assumes that the iocb completions occur while
10578  * this function sleep. So, this function cannot be called from
10579  * the thread which process iocb completion for this ring.
10580  * This function clears the iocb_flag of the iocb object before
10581  * issuing the iocb and the iocb completion handler sets this
10582  * flag and wakes this thread when the iocb completes.
10583  * The contents of the response iocb will be copied to prspiocbq
10584  * by the completion handler when the command completes.
10585  * This function returns IOCB_SUCCESS when success.
10586  * This function is called with no lock held.
10587  **/
10588 int
10589 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
10590 			 uint32_t ring_number,
10591 			 struct lpfc_iocbq *piocb,
10592 			 struct lpfc_iocbq *prspiocbq,
10593 			 uint32_t timeout)
10594 {
10595 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10596 	long timeleft, timeout_req = 0;
10597 	int retval = IOCB_SUCCESS;
10598 	uint32_t creg_val;
10599 	struct lpfc_iocbq *iocb;
10600 	int txq_cnt = 0;
10601 	int txcmplq_cnt = 0;
10602 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
10603 	unsigned long iflags;
10604 	bool iocb_completed = true;
10605 
10606 	/*
10607 	 * If the caller has provided a response iocbq buffer, then context2
10608 	 * is NULL or its an error.
10609 	 */
10610 	if (prspiocbq) {
10611 		if (piocb->context2)
10612 			return IOCB_ERROR;
10613 		piocb->context2 = prspiocbq;
10614 	}
10615 
10616 	piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
10617 	piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
10618 	piocb->context_un.wait_queue = &done_q;
10619 	piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
10620 
10621 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10622 		if (lpfc_readl(phba->HCregaddr, &creg_val))
10623 			return IOCB_ERROR;
10624 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
10625 		writel(creg_val, phba->HCregaddr);
10626 		readl(phba->HCregaddr); /* flush */
10627 	}
10628 
10629 	retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
10630 				     SLI_IOCB_RET_IOCB);
10631 	if (retval == IOCB_SUCCESS) {
10632 		timeout_req = msecs_to_jiffies(timeout * 1000);
10633 		timeleft = wait_event_timeout(done_q,
10634 				lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
10635 				timeout_req);
10636 		spin_lock_irqsave(&phba->hbalock, iflags);
10637 		if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
10638 
10639 			/*
10640 			 * IOCB timed out.  Inform the wake iocb wait
10641 			 * completion function and set local status
10642 			 */
10643 
10644 			iocb_completed = false;
10645 			piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
10646 		}
10647 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10648 		if (iocb_completed) {
10649 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10650 					"0331 IOCB wake signaled\n");
10651 			/* Note: we are not indicating if the IOCB has a success
10652 			 * status or not - that's for the caller to check.
10653 			 * IOCB_SUCCESS means just that the command was sent and
10654 			 * completed. Not that it completed successfully.
10655 			 * */
10656 		} else if (timeleft == 0) {
10657 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10658 					"0338 IOCB wait timeout error - no "
10659 					"wake response Data x%x\n", timeout);
10660 			retval = IOCB_TIMEDOUT;
10661 		} else {
10662 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10663 					"0330 IOCB wake NOT set, "
10664 					"Data x%x x%lx\n",
10665 					timeout, (timeleft / jiffies));
10666 			retval = IOCB_TIMEDOUT;
10667 		}
10668 	} else if (retval == IOCB_BUSY) {
10669 		if (phba->cfg_log_verbose & LOG_SLI) {
10670 			list_for_each_entry(iocb, &pring->txq, list) {
10671 				txq_cnt++;
10672 			}
10673 			list_for_each_entry(iocb, &pring->txcmplq, list) {
10674 				txcmplq_cnt++;
10675 			}
10676 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10677 				"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10678 				phba->iocb_cnt, txq_cnt, txcmplq_cnt);
10679 		}
10680 		return retval;
10681 	} else {
10682 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10683 				"0332 IOCB wait issue failed, Data x%x\n",
10684 				retval);
10685 		retval = IOCB_ERROR;
10686 	}
10687 
10688 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10689 		if (lpfc_readl(phba->HCregaddr, &creg_val))
10690 			return IOCB_ERROR;
10691 		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
10692 		writel(creg_val, phba->HCregaddr);
10693 		readl(phba->HCregaddr); /* flush */
10694 	}
10695 
10696 	if (prspiocbq)
10697 		piocb->context2 = NULL;
10698 
10699 	piocb->context_un.wait_queue = NULL;
10700 	piocb->iocb_cmpl = NULL;
10701 	return retval;
10702 }
10703 
10704 /**
10705  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
10706  * @phba: Pointer to HBA context object.
10707  * @pmboxq: Pointer to driver mailbox object.
10708  * @timeout: Timeout in number of seconds.
10709  *
10710  * This function issues the mailbox to firmware and waits for the
10711  * mailbox command to complete. If the mailbox command is not
10712  * completed within timeout seconds, it returns MBX_TIMEOUT.
10713  * The function waits for the mailbox completion using an
10714  * interruptible wait. If the thread is woken up due to a
10715  * signal, MBX_TIMEOUT error is returned to the caller. Caller
10716  * should not free the mailbox resources, if this function returns
10717  * MBX_TIMEOUT.
10718  * This function will sleep while waiting for mailbox completion.
10719  * So, this function should not be called from any context which
10720  * does not allow sleeping. Due to the same reason, this function
10721  * cannot be called with interrupt disabled.
10722  * This function assumes that the mailbox completion occurs while
10723  * this function sleep. So, this function cannot be called from
10724  * the worker thread which processes mailbox completion.
10725  * This function is called in the context of HBA management
10726  * applications.
10727  * This function returns MBX_SUCCESS when successful.
10728  * This function is called with no lock held.
10729  **/
10730 int
10731 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10732 			 uint32_t timeout)
10733 {
10734 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10735 	MAILBOX_t *mb = NULL;
10736 	int retval;
10737 	unsigned long flag;
10738 
10739 	/* The caller might set context1 for extended buffer */
10740 	if (pmboxq->context1)
10741 		mb = (MAILBOX_t *)pmboxq->context1;
10742 
10743 	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
10744 	/* setup wake call as IOCB callback */
10745 	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
10746 	/* setup context field to pass wait_queue pointer to wake function  */
10747 	pmboxq->context1 = &done_q;
10748 
10749 	/* now issue the command */
10750 	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
10751 	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
10752 		wait_event_interruptible_timeout(done_q,
10753 				pmboxq->mbox_flag & LPFC_MBX_WAKE,
10754 				msecs_to_jiffies(timeout * 1000));
10755 
10756 		spin_lock_irqsave(&phba->hbalock, flag);
10757 		/* restore the possible extended buffer for free resource */
10758 		pmboxq->context1 = (uint8_t *)mb;
10759 		/*
10760 		 * if LPFC_MBX_WAKE flag is set the mailbox is completed
10761 		 * else do not free the resources.
10762 		 */
10763 		if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
10764 			retval = MBX_SUCCESS;
10765 		} else {
10766 			retval = MBX_TIMEOUT;
10767 			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10768 		}
10769 		spin_unlock_irqrestore(&phba->hbalock, flag);
10770 	} else {
10771 		/* restore the possible extended buffer for free resource */
10772 		pmboxq->context1 = (uint8_t *)mb;
10773 	}
10774 
10775 	return retval;
10776 }
10777 
10778 /**
10779  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
10780  * @phba: Pointer to HBA context.
10781  *
10782  * This function is called to shutdown the driver's mailbox sub-system.
10783  * It first marks the mailbox sub-system is in a block state to prevent
10784  * the asynchronous mailbox command from issued off the pending mailbox
10785  * command queue. If the mailbox command sub-system shutdown is due to
10786  * HBA error conditions such as EEH or ERATT, this routine shall invoke
10787  * the mailbox sub-system flush routine to forcefully bring down the
10788  * mailbox sub-system. Otherwise, if it is due to normal condition (such
10789  * as with offline or HBA function reset), this routine will wait for the
10790  * outstanding mailbox command to complete before invoking the mailbox
10791  * sub-system flush routine to gracefully bring down mailbox sub-system.
10792  **/
10793 void
10794 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
10795 {
10796 	struct lpfc_sli *psli = &phba->sli;
10797 	unsigned long timeout;
10798 
10799 	if (mbx_action == LPFC_MBX_NO_WAIT) {
10800 		/* delay 100ms for port state */
10801 		msleep(100);
10802 		lpfc_sli_mbox_sys_flush(phba);
10803 		return;
10804 	}
10805 	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
10806 
10807 	spin_lock_irq(&phba->hbalock);
10808 	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10809 
10810 	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
10811 		/* Determine how long we might wait for the active mailbox
10812 		 * command to be gracefully completed by firmware.
10813 		 */
10814 		if (phba->sli.mbox_active)
10815 			timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
10816 						phba->sli.mbox_active) *
10817 						1000) + jiffies;
10818 		spin_unlock_irq(&phba->hbalock);
10819 
10820 		while (phba->sli.mbox_active) {
10821 			/* Check active mailbox complete status every 2ms */
10822 			msleep(2);
10823 			if (time_after(jiffies, timeout))
10824 				/* Timeout, let the mailbox flush routine to
10825 				 * forcefully release active mailbox command
10826 				 */
10827 				break;
10828 		}
10829 	} else
10830 		spin_unlock_irq(&phba->hbalock);
10831 
10832 	lpfc_sli_mbox_sys_flush(phba);
10833 }
10834 
10835 /**
10836  * lpfc_sli_eratt_read - read sli-3 error attention events
10837  * @phba: Pointer to HBA context.
10838  *
10839  * This function is called to read the SLI3 device error attention registers
10840  * for possible error attention events. The caller must hold the hostlock
10841  * with spin_lock_irq().
10842  *
10843  * This function returns 1 when there is Error Attention in the Host Attention
10844  * Register and returns 0 otherwise.
10845  **/
10846 static int
10847 lpfc_sli_eratt_read(struct lpfc_hba *phba)
10848 {
10849 	uint32_t ha_copy;
10850 
10851 	/* Read chip Host Attention (HA) register */
10852 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
10853 		goto unplug_err;
10854 
10855 	if (ha_copy & HA_ERATT) {
10856 		/* Read host status register to retrieve error event */
10857 		if (lpfc_sli_read_hs(phba))
10858 			goto unplug_err;
10859 
10860 		/* Check if there is a deferred error condition is active */
10861 		if ((HS_FFER1 & phba->work_hs) &&
10862 		    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10863 		      HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
10864 			phba->hba_flag |= DEFER_ERATT;
10865 			/* Clear all interrupt enable conditions */
10866 			writel(0, phba->HCregaddr);
10867 			readl(phba->HCregaddr);
10868 		}
10869 
10870 		/* Set the driver HA work bitmap */
10871 		phba->work_ha |= HA_ERATT;
10872 		/* Indicate polling handles this ERATT */
10873 		phba->hba_flag |= HBA_ERATT_HANDLED;
10874 		return 1;
10875 	}
10876 	return 0;
10877 
10878 unplug_err:
10879 	/* Set the driver HS work bitmap */
10880 	phba->work_hs |= UNPLUG_ERR;
10881 	/* Set the driver HA work bitmap */
10882 	phba->work_ha |= HA_ERATT;
10883 	/* Indicate polling handles this ERATT */
10884 	phba->hba_flag |= HBA_ERATT_HANDLED;
10885 	return 1;
10886 }
10887 
10888 /**
10889  * lpfc_sli4_eratt_read - read sli-4 error attention events
10890  * @phba: Pointer to HBA context.
10891  *
10892  * This function is called to read the SLI4 device error attention registers
10893  * for possible error attention events. The caller must hold the hostlock
10894  * with spin_lock_irq().
10895  *
10896  * This function returns 1 when there is Error Attention in the Host Attention
10897  * Register and returns 0 otherwise.
10898  **/
10899 static int
10900 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10901 {
10902 	uint32_t uerr_sta_hi, uerr_sta_lo;
10903 	uint32_t if_type, portsmphr;
10904 	struct lpfc_register portstat_reg;
10905 
10906 	/*
10907 	 * For now, use the SLI4 device internal unrecoverable error
10908 	 * registers for error attention. This can be changed later.
10909 	 */
10910 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10911 	switch (if_type) {
10912 	case LPFC_SLI_INTF_IF_TYPE_0:
10913 		if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10914 			&uerr_sta_lo) ||
10915 			lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10916 			&uerr_sta_hi)) {
10917 			phba->work_hs |= UNPLUG_ERR;
10918 			phba->work_ha |= HA_ERATT;
10919 			phba->hba_flag |= HBA_ERATT_HANDLED;
10920 			return 1;
10921 		}
10922 		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10923 		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10924 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10925 					"1423 HBA Unrecoverable error: "
10926 					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10927 					"ue_mask_lo_reg=0x%x, "
10928 					"ue_mask_hi_reg=0x%x\n",
10929 					uerr_sta_lo, uerr_sta_hi,
10930 					phba->sli4_hba.ue_mask_lo,
10931 					phba->sli4_hba.ue_mask_hi);
10932 			phba->work_status[0] = uerr_sta_lo;
10933 			phba->work_status[1] = uerr_sta_hi;
10934 			phba->work_ha |= HA_ERATT;
10935 			phba->hba_flag |= HBA_ERATT_HANDLED;
10936 			return 1;
10937 		}
10938 		break;
10939 	case LPFC_SLI_INTF_IF_TYPE_2:
10940 		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10941 			&portstat_reg.word0) ||
10942 			lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10943 			&portsmphr)){
10944 			phba->work_hs |= UNPLUG_ERR;
10945 			phba->work_ha |= HA_ERATT;
10946 			phba->hba_flag |= HBA_ERATT_HANDLED;
10947 			return 1;
10948 		}
10949 		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10950 			phba->work_status[0] =
10951 				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10952 			phba->work_status[1] =
10953 				readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10954 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10955 					"2885 Port Status Event: "
10956 					"port status reg 0x%x, "
10957 					"port smphr reg 0x%x, "
10958 					"error 1=0x%x, error 2=0x%x\n",
10959 					portstat_reg.word0,
10960 					portsmphr,
10961 					phba->work_status[0],
10962 					phba->work_status[1]);
10963 			phba->work_ha |= HA_ERATT;
10964 			phba->hba_flag |= HBA_ERATT_HANDLED;
10965 			return 1;
10966 		}
10967 		break;
10968 	case LPFC_SLI_INTF_IF_TYPE_1:
10969 	default:
10970 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10971 				"2886 HBA Error Attention on unsupported "
10972 				"if type %d.", if_type);
10973 		return 1;
10974 	}
10975 
10976 	return 0;
10977 }
10978 
10979 /**
10980  * lpfc_sli_check_eratt - check error attention events
10981  * @phba: Pointer to HBA context.
10982  *
10983  * This function is called from timer soft interrupt context to check HBA's
10984  * error attention register bit for error attention events.
10985  *
10986  * This function returns 1 when there is Error Attention in the Host Attention
10987  * Register and returns 0 otherwise.
10988  **/
10989 int
10990 lpfc_sli_check_eratt(struct lpfc_hba *phba)
10991 {
10992 	uint32_t ha_copy;
10993 
10994 	/* If somebody is waiting to handle an eratt, don't process it
10995 	 * here. The brdkill function will do this.
10996 	 */
10997 	if (phba->link_flag & LS_IGNORE_ERATT)
10998 		return 0;
10999 
11000 	/* Check if interrupt handler handles this ERATT */
11001 	spin_lock_irq(&phba->hbalock);
11002 	if (phba->hba_flag & HBA_ERATT_HANDLED) {
11003 		/* Interrupt handler has handled ERATT */
11004 		spin_unlock_irq(&phba->hbalock);
11005 		return 0;
11006 	}
11007 
11008 	/*
11009 	 * If there is deferred error attention, do not check for error
11010 	 * attention
11011 	 */
11012 	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11013 		spin_unlock_irq(&phba->hbalock);
11014 		return 0;
11015 	}
11016 
11017 	/* If PCI channel is offline, don't process it */
11018 	if (unlikely(pci_channel_offline(phba->pcidev))) {
11019 		spin_unlock_irq(&phba->hbalock);
11020 		return 0;
11021 	}
11022 
11023 	switch (phba->sli_rev) {
11024 	case LPFC_SLI_REV2:
11025 	case LPFC_SLI_REV3:
11026 		/* Read chip Host Attention (HA) register */
11027 		ha_copy = lpfc_sli_eratt_read(phba);
11028 		break;
11029 	case LPFC_SLI_REV4:
11030 		/* Read device Uncoverable Error (UERR) registers */
11031 		ha_copy = lpfc_sli4_eratt_read(phba);
11032 		break;
11033 	default:
11034 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11035 				"0299 Invalid SLI revision (%d)\n",
11036 				phba->sli_rev);
11037 		ha_copy = 0;
11038 		break;
11039 	}
11040 	spin_unlock_irq(&phba->hbalock);
11041 
11042 	return ha_copy;
11043 }
11044 
11045 /**
11046  * lpfc_intr_state_check - Check device state for interrupt handling
11047  * @phba: Pointer to HBA context.
11048  *
11049  * This inline routine checks whether a device or its PCI slot is in a state
11050  * that the interrupt should be handled.
11051  *
11052  * This function returns 0 if the device or the PCI slot is in a state that
11053  * interrupt should be handled, otherwise -EIO.
11054  */
11055 static inline int
11056 lpfc_intr_state_check(struct lpfc_hba *phba)
11057 {
11058 	/* If the pci channel is offline, ignore all the interrupts */
11059 	if (unlikely(pci_channel_offline(phba->pcidev)))
11060 		return -EIO;
11061 
11062 	/* Update device level interrupt statistics */
11063 	phba->sli.slistat.sli_intr++;
11064 
11065 	/* Ignore all interrupts during initialization. */
11066 	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11067 		return -EIO;
11068 
11069 	return 0;
11070 }
11071 
11072 /**
11073  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
11074  * @irq: Interrupt number.
11075  * @dev_id: The device context pointer.
11076  *
11077  * This function is directly called from the PCI layer as an interrupt
11078  * service routine when device with SLI-3 interface spec is enabled with
11079  * MSI-X multi-message interrupt mode and there are slow-path events in
11080  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11081  * interrupt mode, this function is called as part of the device-level
11082  * interrupt handler. When the PCI slot is in error recovery or the HBA
11083  * is undergoing initialization, the interrupt handler will not process
11084  * the interrupt. The link attention and ELS ring attention events are
11085  * handled by the worker thread. The interrupt handler signals the worker
11086  * thread and returns for these events. This function is called without
11087  * any lock held. It gets the hbalock to access and update SLI data
11088  * structures.
11089  *
11090  * This function returns IRQ_HANDLED when interrupt is handled else it
11091  * returns IRQ_NONE.
11092  **/
11093 irqreturn_t
11094 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
11095 {
11096 	struct lpfc_hba  *phba;
11097 	uint32_t ha_copy, hc_copy;
11098 	uint32_t work_ha_copy;
11099 	unsigned long status;
11100 	unsigned long iflag;
11101 	uint32_t control;
11102 
11103 	MAILBOX_t *mbox, *pmbox;
11104 	struct lpfc_vport *vport;
11105 	struct lpfc_nodelist *ndlp;
11106 	struct lpfc_dmabuf *mp;
11107 	LPFC_MBOXQ_t *pmb;
11108 	int rc;
11109 
11110 	/*
11111 	 * Get the driver's phba structure from the dev_id and
11112 	 * assume the HBA is not interrupting.
11113 	 */
11114 	phba = (struct lpfc_hba *)dev_id;
11115 
11116 	if (unlikely(!phba))
11117 		return IRQ_NONE;
11118 
11119 	/*
11120 	 * Stuff needs to be attented to when this function is invoked as an
11121 	 * individual interrupt handler in MSI-X multi-message interrupt mode
11122 	 */
11123 	if (phba->intr_type == MSIX) {
11124 		/* Check device state for handling interrupt */
11125 		if (lpfc_intr_state_check(phba))
11126 			return IRQ_NONE;
11127 		/* Need to read HA REG for slow-path events */
11128 		spin_lock_irqsave(&phba->hbalock, iflag);
11129 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
11130 			goto unplug_error;
11131 		/* If somebody is waiting to handle an eratt don't process it
11132 		 * here. The brdkill function will do this.
11133 		 */
11134 		if (phba->link_flag & LS_IGNORE_ERATT)
11135 			ha_copy &= ~HA_ERATT;
11136 		/* Check the need for handling ERATT in interrupt handler */
11137 		if (ha_copy & HA_ERATT) {
11138 			if (phba->hba_flag & HBA_ERATT_HANDLED)
11139 				/* ERATT polling has handled ERATT */
11140 				ha_copy &= ~HA_ERATT;
11141 			else
11142 				/* Indicate interrupt handler handles ERATT */
11143 				phba->hba_flag |= HBA_ERATT_HANDLED;
11144 		}
11145 
11146 		/*
11147 		 * If there is deferred error attention, do not check for any
11148 		 * interrupt.
11149 		 */
11150 		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11151 			spin_unlock_irqrestore(&phba->hbalock, iflag);
11152 			return IRQ_NONE;
11153 		}
11154 
11155 		/* Clear up only attention source related to slow-path */
11156 		if (lpfc_readl(phba->HCregaddr, &hc_copy))
11157 			goto unplug_error;
11158 
11159 		writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
11160 			HC_LAINT_ENA | HC_ERINT_ENA),
11161 			phba->HCregaddr);
11162 		writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
11163 			phba->HAregaddr);
11164 		writel(hc_copy, phba->HCregaddr);
11165 		readl(phba->HAregaddr); /* flush */
11166 		spin_unlock_irqrestore(&phba->hbalock, iflag);
11167 	} else
11168 		ha_copy = phba->ha_copy;
11169 
11170 	work_ha_copy = ha_copy & phba->work_ha_mask;
11171 
11172 	if (work_ha_copy) {
11173 		if (work_ha_copy & HA_LATT) {
11174 			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
11175 				/*
11176 				 * Turn off Link Attention interrupts
11177 				 * until CLEAR_LA done
11178 				 */
11179 				spin_lock_irqsave(&phba->hbalock, iflag);
11180 				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
11181 				if (lpfc_readl(phba->HCregaddr, &control))
11182 					goto unplug_error;
11183 				control &= ~HC_LAINT_ENA;
11184 				writel(control, phba->HCregaddr);
11185 				readl(phba->HCregaddr); /* flush */
11186 				spin_unlock_irqrestore(&phba->hbalock, iflag);
11187 			}
11188 			else
11189 				work_ha_copy &= ~HA_LATT;
11190 		}
11191 
11192 		if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
11193 			/*
11194 			 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
11195 			 * the only slow ring.
11196 			 */
11197 			status = (work_ha_copy &
11198 				(HA_RXMASK  << (4*LPFC_ELS_RING)));
11199 			status >>= (4*LPFC_ELS_RING);
11200 			if (status & HA_RXMASK) {
11201 				spin_lock_irqsave(&phba->hbalock, iflag);
11202 				if (lpfc_readl(phba->HCregaddr, &control))
11203 					goto unplug_error;
11204 
11205 				lpfc_debugfs_slow_ring_trc(phba,
11206 				"ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
11207 				control, status,
11208 				(uint32_t)phba->sli.slistat.sli_intr);
11209 
11210 				if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
11211 					lpfc_debugfs_slow_ring_trc(phba,
11212 						"ISR Disable ring:"
11213 						"pwork:x%x hawork:x%x wait:x%x",
11214 						phba->work_ha, work_ha_copy,
11215 						(uint32_t)((unsigned long)
11216 						&phba->work_waitq));
11217 
11218 					control &=
11219 					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
11220 					writel(control, phba->HCregaddr);
11221 					readl(phba->HCregaddr); /* flush */
11222 				}
11223 				else {
11224 					lpfc_debugfs_slow_ring_trc(phba,
11225 						"ISR slow ring:   pwork:"
11226 						"x%x hawork:x%x wait:x%x",
11227 						phba->work_ha, work_ha_copy,
11228 						(uint32_t)((unsigned long)
11229 						&phba->work_waitq));
11230 				}
11231 				spin_unlock_irqrestore(&phba->hbalock, iflag);
11232 			}
11233 		}
11234 		spin_lock_irqsave(&phba->hbalock, iflag);
11235 		if (work_ha_copy & HA_ERATT) {
11236 			if (lpfc_sli_read_hs(phba))
11237 				goto unplug_error;
11238 			/*
11239 			 * Check if there is a deferred error condition
11240 			 * is active
11241 			 */
11242 			if ((HS_FFER1 & phba->work_hs) &&
11243 				((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11244 				  HS_FFER6 | HS_FFER7 | HS_FFER8) &
11245 				  phba->work_hs)) {
11246 				phba->hba_flag |= DEFER_ERATT;
11247 				/* Clear all interrupt enable conditions */
11248 				writel(0, phba->HCregaddr);
11249 				readl(phba->HCregaddr);
11250 			}
11251 		}
11252 
11253 		if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
11254 			pmb = phba->sli.mbox_active;
11255 			pmbox = &pmb->u.mb;
11256 			mbox = phba->mbox;
11257 			vport = pmb->vport;
11258 
11259 			/* First check out the status word */
11260 			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
11261 			if (pmbox->mbxOwner != OWN_HOST) {
11262 				spin_unlock_irqrestore(&phba->hbalock, iflag);
11263 				/*
11264 				 * Stray Mailbox Interrupt, mbxCommand <cmd>
11265 				 * mbxStatus <status>
11266 				 */
11267 				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11268 						LOG_SLI,
11269 						"(%d):0304 Stray Mailbox "
11270 						"Interrupt mbxCommand x%x "
11271 						"mbxStatus x%x\n",
11272 						(vport ? vport->vpi : 0),
11273 						pmbox->mbxCommand,
11274 						pmbox->mbxStatus);
11275 				/* clear mailbox attention bit */
11276 				work_ha_copy &= ~HA_MBATT;
11277 			} else {
11278 				phba->sli.mbox_active = NULL;
11279 				spin_unlock_irqrestore(&phba->hbalock, iflag);
11280 				phba->last_completion_time = jiffies;
11281 				del_timer(&phba->sli.mbox_tmo);
11282 				if (pmb->mbox_cmpl) {
11283 					lpfc_sli_pcimem_bcopy(mbox, pmbox,
11284 							MAILBOX_CMD_SIZE);
11285 					if (pmb->out_ext_byte_len &&
11286 						pmb->context2)
11287 						lpfc_sli_pcimem_bcopy(
11288 						phba->mbox_ext,
11289 						pmb->context2,
11290 						pmb->out_ext_byte_len);
11291 				}
11292 				if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11293 					pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11294 
11295 					lpfc_debugfs_disc_trc(vport,
11296 						LPFC_DISC_TRC_MBOX_VPORT,
11297 						"MBOX dflt rpi: : "
11298 						"status:x%x rpi:x%x",
11299 						(uint32_t)pmbox->mbxStatus,
11300 						pmbox->un.varWords[0], 0);
11301 
11302 					if (!pmbox->mbxStatus) {
11303 						mp = (struct lpfc_dmabuf *)
11304 							(pmb->context1);
11305 						ndlp = (struct lpfc_nodelist *)
11306 							pmb->context2;
11307 
11308 						/* Reg_LOGIN of dflt RPI was
11309 						 * successful. new lets get
11310 						 * rid of the RPI using the
11311 						 * same mbox buffer.
11312 						 */
11313 						lpfc_unreg_login(phba,
11314 							vport->vpi,
11315 							pmbox->un.varWords[0],
11316 							pmb);
11317 						pmb->mbox_cmpl =
11318 							lpfc_mbx_cmpl_dflt_rpi;
11319 						pmb->context1 = mp;
11320 						pmb->context2 = ndlp;
11321 						pmb->vport = vport;
11322 						rc = lpfc_sli_issue_mbox(phba,
11323 								pmb,
11324 								MBX_NOWAIT);
11325 						if (rc != MBX_BUSY)
11326 							lpfc_printf_log(phba,
11327 							KERN_ERR,
11328 							LOG_MBOX | LOG_SLI,
11329 							"0350 rc should have"
11330 							"been MBX_BUSY\n");
11331 						if (rc != MBX_NOT_FINISHED)
11332 							goto send_current_mbox;
11333 					}
11334 				}
11335 				spin_lock_irqsave(
11336 						&phba->pport->work_port_lock,
11337 						iflag);
11338 				phba->pport->work_port_events &=
11339 					~WORKER_MBOX_TMO;
11340 				spin_unlock_irqrestore(
11341 						&phba->pport->work_port_lock,
11342 						iflag);
11343 				lpfc_mbox_cmpl_put(phba, pmb);
11344 			}
11345 		} else
11346 			spin_unlock_irqrestore(&phba->hbalock, iflag);
11347 
11348 		if ((work_ha_copy & HA_MBATT) &&
11349 		    (phba->sli.mbox_active == NULL)) {
11350 send_current_mbox:
11351 			/* Process next mailbox command if there is one */
11352 			do {
11353 				rc = lpfc_sli_issue_mbox(phba, NULL,
11354 							 MBX_NOWAIT);
11355 			} while (rc == MBX_NOT_FINISHED);
11356 			if (rc != MBX_SUCCESS)
11357 				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11358 						LOG_SLI, "0349 rc should be "
11359 						"MBX_SUCCESS\n");
11360 		}
11361 
11362 		spin_lock_irqsave(&phba->hbalock, iflag);
11363 		phba->work_ha |= work_ha_copy;
11364 		spin_unlock_irqrestore(&phba->hbalock, iflag);
11365 		lpfc_worker_wake_up(phba);
11366 	}
11367 	return IRQ_HANDLED;
11368 unplug_error:
11369 	spin_unlock_irqrestore(&phba->hbalock, iflag);
11370 	return IRQ_HANDLED;
11371 
11372 } /* lpfc_sli_sp_intr_handler */
11373 
11374 /**
11375  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
11376  * @irq: Interrupt number.
11377  * @dev_id: The device context pointer.
11378  *
11379  * This function is directly called from the PCI layer as an interrupt
11380  * service routine when device with SLI-3 interface spec is enabled with
11381  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11382  * ring event in the HBA. However, when the device is enabled with either
11383  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11384  * device-level interrupt handler. When the PCI slot is in error recovery
11385  * or the HBA is undergoing initialization, the interrupt handler will not
11386  * process the interrupt. The SCSI FCP fast-path ring event are handled in
11387  * the intrrupt context. This function is called without any lock held.
11388  * It gets the hbalock to access and update SLI data structures.
11389  *
11390  * This function returns IRQ_HANDLED when interrupt is handled else it
11391  * returns IRQ_NONE.
11392  **/
11393 irqreturn_t
11394 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
11395 {
11396 	struct lpfc_hba  *phba;
11397 	uint32_t ha_copy;
11398 	unsigned long status;
11399 	unsigned long iflag;
11400 
11401 	/* Get the driver's phba structure from the dev_id and
11402 	 * assume the HBA is not interrupting.
11403 	 */
11404 	phba = (struct lpfc_hba *) dev_id;
11405 
11406 	if (unlikely(!phba))
11407 		return IRQ_NONE;
11408 
11409 	/*
11410 	 * Stuff needs to be attented to when this function is invoked as an
11411 	 * individual interrupt handler in MSI-X multi-message interrupt mode
11412 	 */
11413 	if (phba->intr_type == MSIX) {
11414 		/* Check device state for handling interrupt */
11415 		if (lpfc_intr_state_check(phba))
11416 			return IRQ_NONE;
11417 		/* Need to read HA REG for FCP ring and other ring events */
11418 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
11419 			return IRQ_HANDLED;
11420 		/* Clear up only attention source related to fast-path */
11421 		spin_lock_irqsave(&phba->hbalock, iflag);
11422 		/*
11423 		 * If there is deferred error attention, do not check for
11424 		 * any interrupt.
11425 		 */
11426 		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11427 			spin_unlock_irqrestore(&phba->hbalock, iflag);
11428 			return IRQ_NONE;
11429 		}
11430 		writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
11431 			phba->HAregaddr);
11432 		readl(phba->HAregaddr); /* flush */
11433 		spin_unlock_irqrestore(&phba->hbalock, iflag);
11434 	} else
11435 		ha_copy = phba->ha_copy;
11436 
11437 	/*
11438 	 * Process all events on FCP ring. Take the optimized path for FCP IO.
11439 	 */
11440 	ha_copy &= ~(phba->work_ha_mask);
11441 
11442 	status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
11443 	status >>= (4*LPFC_FCP_RING);
11444 	if (status & HA_RXMASK)
11445 		lpfc_sli_handle_fast_ring_event(phba,
11446 						&phba->sli.ring[LPFC_FCP_RING],
11447 						status);
11448 
11449 	if (phba->cfg_multi_ring_support == 2) {
11450 		/*
11451 		 * Process all events on extra ring. Take the optimized path
11452 		 * for extra ring IO.
11453 		 */
11454 		status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
11455 		status >>= (4*LPFC_EXTRA_RING);
11456 		if (status & HA_RXMASK) {
11457 			lpfc_sli_handle_fast_ring_event(phba,
11458 					&phba->sli.ring[LPFC_EXTRA_RING],
11459 					status);
11460 		}
11461 	}
11462 	return IRQ_HANDLED;
11463 }  /* lpfc_sli_fp_intr_handler */
11464 
11465 /**
11466  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
11467  * @irq: Interrupt number.
11468  * @dev_id: The device context pointer.
11469  *
11470  * This function is the HBA device-level interrupt handler to device with
11471  * SLI-3 interface spec, called from the PCI layer when either MSI or
11472  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
11473  * requires driver attention. This function invokes the slow-path interrupt
11474  * attention handling function and fast-path interrupt attention handling
11475  * function in turn to process the relevant HBA attention events. This
11476  * function is called without any lock held. It gets the hbalock to access
11477  * and update SLI data structures.
11478  *
11479  * This function returns IRQ_HANDLED when interrupt is handled, else it
11480  * returns IRQ_NONE.
11481  **/
11482 irqreturn_t
11483 lpfc_sli_intr_handler(int irq, void *dev_id)
11484 {
11485 	struct lpfc_hba  *phba;
11486 	irqreturn_t sp_irq_rc, fp_irq_rc;
11487 	unsigned long status1, status2;
11488 	uint32_t hc_copy;
11489 
11490 	/*
11491 	 * Get the driver's phba structure from the dev_id and
11492 	 * assume the HBA is not interrupting.
11493 	 */
11494 	phba = (struct lpfc_hba *) dev_id;
11495 
11496 	if (unlikely(!phba))
11497 		return IRQ_NONE;
11498 
11499 	/* Check device state for handling interrupt */
11500 	if (lpfc_intr_state_check(phba))
11501 		return IRQ_NONE;
11502 
11503 	spin_lock(&phba->hbalock);
11504 	if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
11505 		spin_unlock(&phba->hbalock);
11506 		return IRQ_HANDLED;
11507 	}
11508 
11509 	if (unlikely(!phba->ha_copy)) {
11510 		spin_unlock(&phba->hbalock);
11511 		return IRQ_NONE;
11512 	} else if (phba->ha_copy & HA_ERATT) {
11513 		if (phba->hba_flag & HBA_ERATT_HANDLED)
11514 			/* ERATT polling has handled ERATT */
11515 			phba->ha_copy &= ~HA_ERATT;
11516 		else
11517 			/* Indicate interrupt handler handles ERATT */
11518 			phba->hba_flag |= HBA_ERATT_HANDLED;
11519 	}
11520 
11521 	/*
11522 	 * If there is deferred error attention, do not check for any interrupt.
11523 	 */
11524 	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11525 		spin_unlock(&phba->hbalock);
11526 		return IRQ_NONE;
11527 	}
11528 
11529 	/* Clear attention sources except link and error attentions */
11530 	if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
11531 		spin_unlock(&phba->hbalock);
11532 		return IRQ_HANDLED;
11533 	}
11534 	writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
11535 		| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
11536 		phba->HCregaddr);
11537 	writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
11538 	writel(hc_copy, phba->HCregaddr);
11539 	readl(phba->HAregaddr); /* flush */
11540 	spin_unlock(&phba->hbalock);
11541 
11542 	/*
11543 	 * Invokes slow-path host attention interrupt handling as appropriate.
11544 	 */
11545 
11546 	/* status of events with mailbox and link attention */
11547 	status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
11548 
11549 	/* status of events with ELS ring */
11550 	status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
11551 	status2 >>= (4*LPFC_ELS_RING);
11552 
11553 	if (status1 || (status2 & HA_RXMASK))
11554 		sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
11555 	else
11556 		sp_irq_rc = IRQ_NONE;
11557 
11558 	/*
11559 	 * Invoke fast-path host attention interrupt handling as appropriate.
11560 	 */
11561 
11562 	/* status of events with FCP ring */
11563 	status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
11564 	status1 >>= (4*LPFC_FCP_RING);
11565 
11566 	/* status of events with extra ring */
11567 	if (phba->cfg_multi_ring_support == 2) {
11568 		status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
11569 		status2 >>= (4*LPFC_EXTRA_RING);
11570 	} else
11571 		status2 = 0;
11572 
11573 	if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
11574 		fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
11575 	else
11576 		fp_irq_rc = IRQ_NONE;
11577 
11578 	/* Return device-level interrupt handling status */
11579 	return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
11580 }  /* lpfc_sli_intr_handler */
11581 
11582 /**
11583  * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
11584  * @phba: pointer to lpfc hba data structure.
11585  *
11586  * This routine is invoked by the worker thread to process all the pending
11587  * SLI4 FCP abort XRI events.
11588  **/
11589 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
11590 {
11591 	struct lpfc_cq_event *cq_event;
11592 
11593 	/* First, declare the fcp xri abort event has been handled */
11594 	spin_lock_irq(&phba->hbalock);
11595 	phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
11596 	spin_unlock_irq(&phba->hbalock);
11597 	/* Now, handle all the fcp xri abort events */
11598 	while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
11599 		/* Get the first event from the head of the event queue */
11600 		spin_lock_irq(&phba->hbalock);
11601 		list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
11602 				 cq_event, struct lpfc_cq_event, list);
11603 		spin_unlock_irq(&phba->hbalock);
11604 		/* Notify aborted XRI for FCP work queue */
11605 		lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
11606 		/* Free the event processed back to the free pool */
11607 		lpfc_sli4_cq_event_release(phba, cq_event);
11608 	}
11609 }
11610 
11611 /**
11612  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
11613  * @phba: pointer to lpfc hba data structure.
11614  *
11615  * This routine is invoked by the worker thread to process all the pending
11616  * SLI4 els abort xri events.
11617  **/
11618 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
11619 {
11620 	struct lpfc_cq_event *cq_event;
11621 
11622 	/* First, declare the els xri abort event has been handled */
11623 	spin_lock_irq(&phba->hbalock);
11624 	phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
11625 	spin_unlock_irq(&phba->hbalock);
11626 	/* Now, handle all the els xri abort events */
11627 	while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
11628 		/* Get the first event from the head of the event queue */
11629 		spin_lock_irq(&phba->hbalock);
11630 		list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11631 				 cq_event, struct lpfc_cq_event, list);
11632 		spin_unlock_irq(&phba->hbalock);
11633 		/* Notify aborted XRI for ELS work queue */
11634 		lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
11635 		/* Free the event processed back to the free pool */
11636 		lpfc_sli4_cq_event_release(phba, cq_event);
11637 	}
11638 }
11639 
11640 /**
11641  * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
11642  * @phba: pointer to lpfc hba data structure
11643  * @pIocbIn: pointer to the rspiocbq
11644  * @pIocbOut: pointer to the cmdiocbq
11645  * @wcqe: pointer to the complete wcqe
11646  *
11647  * This routine transfers the fields of a command iocbq to a response iocbq
11648  * by copying all the IOCB fields from command iocbq and transferring the
11649  * completion status information from the complete wcqe.
11650  **/
11651 static void
11652 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
11653 			      struct lpfc_iocbq *pIocbIn,
11654 			      struct lpfc_iocbq *pIocbOut,
11655 			      struct lpfc_wcqe_complete *wcqe)
11656 {
11657 	int numBdes, i;
11658 	unsigned long iflags;
11659 	uint32_t status, max_response;
11660 	struct lpfc_dmabuf *dmabuf;
11661 	struct ulp_bde64 *bpl, bde;
11662 	size_t offset = offsetof(struct lpfc_iocbq, iocb);
11663 
11664 	memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
11665 	       sizeof(struct lpfc_iocbq) - offset);
11666 	/* Map WCQE parameters into irspiocb parameters */
11667 	status = bf_get(lpfc_wcqe_c_status, wcqe);
11668 	pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
11669 	if (pIocbOut->iocb_flag & LPFC_IO_FCP)
11670 		if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
11671 			pIocbIn->iocb.un.fcpi.fcpi_parm =
11672 					pIocbOut->iocb.un.fcpi.fcpi_parm -
11673 					wcqe->total_data_placed;
11674 		else
11675 			pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11676 	else {
11677 		pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11678 		switch (pIocbOut->iocb.ulpCommand) {
11679 		case CMD_ELS_REQUEST64_CR:
11680 			dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
11681 			bpl  = (struct ulp_bde64 *)dmabuf->virt;
11682 			bde.tus.w = le32_to_cpu(bpl[1].tus.w);
11683 			max_response = bde.tus.f.bdeSize;
11684 			break;
11685 		case CMD_GEN_REQUEST64_CR:
11686 			max_response = 0;
11687 			if (!pIocbOut->context3)
11688 				break;
11689 			numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
11690 					sizeof(struct ulp_bde64);
11691 			dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
11692 			bpl = (struct ulp_bde64 *)dmabuf->virt;
11693 			for (i = 0; i < numBdes; i++) {
11694 				bde.tus.w = le32_to_cpu(bpl[i].tus.w);
11695 				if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
11696 					max_response += bde.tus.f.bdeSize;
11697 			}
11698 			break;
11699 		default:
11700 			max_response = wcqe->total_data_placed;
11701 			break;
11702 		}
11703 		if (max_response < wcqe->total_data_placed)
11704 			pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
11705 		else
11706 			pIocbIn->iocb.un.genreq64.bdl.bdeSize =
11707 				wcqe->total_data_placed;
11708 	}
11709 
11710 	/* Convert BG errors for completion status */
11711 	if (status == CQE_STATUS_DI_ERROR) {
11712 		pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
11713 
11714 		if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
11715 			pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
11716 		else
11717 			pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
11718 
11719 		pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
11720 		if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
11721 			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11722 				BGS_GUARD_ERR_MASK;
11723 		if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
11724 			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11725 				BGS_APPTAG_ERR_MASK;
11726 		if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
11727 			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11728 				BGS_REFTAG_ERR_MASK;
11729 
11730 		/* Check to see if there was any good data before the error */
11731 		if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
11732 			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11733 				BGS_HI_WATER_MARK_PRESENT_MASK;
11734 			pIocbIn->iocb.unsli3.sli3_bg.bghm =
11735 				wcqe->total_data_placed;
11736 		}
11737 
11738 		/*
11739 		* Set ALL the error bits to indicate we don't know what
11740 		* type of error it is.
11741 		*/
11742 		if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
11743 			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11744 				(BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
11745 				BGS_GUARD_ERR_MASK);
11746 	}
11747 
11748 	/* Pick up HBA exchange busy condition */
11749 	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
11750 		spin_lock_irqsave(&phba->hbalock, iflags);
11751 		pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
11752 		spin_unlock_irqrestore(&phba->hbalock, iflags);
11753 	}
11754 }
11755 
11756 /**
11757  * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
11758  * @phba: Pointer to HBA context object.
11759  * @wcqe: Pointer to work-queue completion queue entry.
11760  *
11761  * This routine handles an ELS work-queue completion event and construct
11762  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
11763  * discovery engine to handle.
11764  *
11765  * Return: Pointer to the receive IOCBQ, NULL otherwise.
11766  **/
11767 static struct lpfc_iocbq *
11768 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11769 			       struct lpfc_iocbq *irspiocbq)
11770 {
11771 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
11772 	struct lpfc_iocbq *cmdiocbq;
11773 	struct lpfc_wcqe_complete *wcqe;
11774 	unsigned long iflags;
11775 
11776 	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
11777 	spin_lock_irqsave(&pring->ring_lock, iflags);
11778 	pring->stats.iocb_event++;
11779 	/* Look up the ELS command IOCB and create pseudo response IOCB */
11780 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11781 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
11782 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
11783 
11784 	if (unlikely(!cmdiocbq)) {
11785 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11786 				"0386 ELS complete with no corresponding "
11787 				"cmdiocb: iotag (%d)\n",
11788 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
11789 		lpfc_sli_release_iocbq(phba, irspiocbq);
11790 		return NULL;
11791 	}
11792 
11793 	/* Fake the irspiocbq and copy necessary response information */
11794 	lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
11795 
11796 	return irspiocbq;
11797 }
11798 
11799 /**
11800  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
11801  * @phba: Pointer to HBA context object.
11802  * @cqe: Pointer to mailbox completion queue entry.
11803  *
11804  * This routine process a mailbox completion queue entry with asynchrous
11805  * event.
11806  *
11807  * Return: true if work posted to worker thread, otherwise false.
11808  **/
11809 static bool
11810 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11811 {
11812 	struct lpfc_cq_event *cq_event;
11813 	unsigned long iflags;
11814 
11815 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11816 			"0392 Async Event: word0:x%x, word1:x%x, "
11817 			"word2:x%x, word3:x%x\n", mcqe->word0,
11818 			mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
11819 
11820 	/* Allocate a new internal CQ_EVENT entry */
11821 	cq_event = lpfc_sli4_cq_event_alloc(phba);
11822 	if (!cq_event) {
11823 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11824 				"0394 Failed to allocate CQ_EVENT entry\n");
11825 		return false;
11826 	}
11827 
11828 	/* Move the CQE into an asynchronous event entry */
11829 	memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
11830 	spin_lock_irqsave(&phba->hbalock, iflags);
11831 	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
11832 	/* Set the async event flag */
11833 	phba->hba_flag |= ASYNC_EVENT;
11834 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11835 
11836 	return true;
11837 }
11838 
11839 /**
11840  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
11841  * @phba: Pointer to HBA context object.
11842  * @cqe: Pointer to mailbox completion queue entry.
11843  *
11844  * This routine process a mailbox completion queue entry with mailbox
11845  * completion event.
11846  *
11847  * Return: true if work posted to worker thread, otherwise false.
11848  **/
11849 static bool
11850 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11851 {
11852 	uint32_t mcqe_status;
11853 	MAILBOX_t *mbox, *pmbox;
11854 	struct lpfc_mqe *mqe;
11855 	struct lpfc_vport *vport;
11856 	struct lpfc_nodelist *ndlp;
11857 	struct lpfc_dmabuf *mp;
11858 	unsigned long iflags;
11859 	LPFC_MBOXQ_t *pmb;
11860 	bool workposted = false;
11861 	int rc;
11862 
11863 	/* If not a mailbox complete MCQE, out by checking mailbox consume */
11864 	if (!bf_get(lpfc_trailer_completed, mcqe))
11865 		goto out_no_mqe_complete;
11866 
11867 	/* Get the reference to the active mbox command */
11868 	spin_lock_irqsave(&phba->hbalock, iflags);
11869 	pmb = phba->sli.mbox_active;
11870 	if (unlikely(!pmb)) {
11871 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11872 				"1832 No pending MBOX command to handle\n");
11873 		spin_unlock_irqrestore(&phba->hbalock, iflags);
11874 		goto out_no_mqe_complete;
11875 	}
11876 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11877 	mqe = &pmb->u.mqe;
11878 	pmbox = (MAILBOX_t *)&pmb->u.mqe;
11879 	mbox = phba->mbox;
11880 	vport = pmb->vport;
11881 
11882 	/* Reset heartbeat timer */
11883 	phba->last_completion_time = jiffies;
11884 	del_timer(&phba->sli.mbox_tmo);
11885 
11886 	/* Move mbox data to caller's mailbox region, do endian swapping */
11887 	if (pmb->mbox_cmpl && mbox)
11888 		lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
11889 
11890 	/*
11891 	 * For mcqe errors, conditionally move a modified error code to
11892 	 * the mbox so that the error will not be missed.
11893 	 */
11894 	mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11895 	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11896 		if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11897 			bf_set(lpfc_mqe_status, mqe,
11898 			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
11899 	}
11900 	if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11901 		pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11902 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11903 				      "MBOX dflt rpi: status:x%x rpi:x%x",
11904 				      mcqe_status,
11905 				      pmbox->un.varWords[0], 0);
11906 		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11907 			mp = (struct lpfc_dmabuf *)(pmb->context1);
11908 			ndlp = (struct lpfc_nodelist *)pmb->context2;
11909 			/* Reg_LOGIN of dflt RPI was successful. Now lets get
11910 			 * RID of the PPI using the same mbox buffer.
11911 			 */
11912 			lpfc_unreg_login(phba, vport->vpi,
11913 					 pmbox->un.varWords[0], pmb);
11914 			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11915 			pmb->context1 = mp;
11916 			pmb->context2 = ndlp;
11917 			pmb->vport = vport;
11918 			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11919 			if (rc != MBX_BUSY)
11920 				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11921 						LOG_SLI, "0385 rc should "
11922 						"have been MBX_BUSY\n");
11923 			if (rc != MBX_NOT_FINISHED)
11924 				goto send_current_mbox;
11925 		}
11926 	}
11927 	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11928 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11929 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11930 
11931 	/* There is mailbox completion work to do */
11932 	spin_lock_irqsave(&phba->hbalock, iflags);
11933 	__lpfc_mbox_cmpl_put(phba, pmb);
11934 	phba->work_ha |= HA_MBATT;
11935 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11936 	workposted = true;
11937 
11938 send_current_mbox:
11939 	spin_lock_irqsave(&phba->hbalock, iflags);
11940 	/* Release the mailbox command posting token */
11941 	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11942 	/* Setting active mailbox pointer need to be in sync to flag clear */
11943 	phba->sli.mbox_active = NULL;
11944 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11945 	/* Wake up worker thread to post the next pending mailbox command */
11946 	lpfc_worker_wake_up(phba);
11947 out_no_mqe_complete:
11948 	if (bf_get(lpfc_trailer_consumed, mcqe))
11949 		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11950 	return workposted;
11951 }
11952 
11953 /**
11954  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11955  * @phba: Pointer to HBA context object.
11956  * @cqe: Pointer to mailbox completion queue entry.
11957  *
11958  * This routine process a mailbox completion queue entry, it invokes the
11959  * proper mailbox complete handling or asynchrous event handling routine
11960  * according to the MCQE's async bit.
11961  *
11962  * Return: true if work posted to worker thread, otherwise false.
11963  **/
11964 static bool
11965 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11966 {
11967 	struct lpfc_mcqe mcqe;
11968 	bool workposted;
11969 
11970 	/* Copy the mailbox MCQE and convert endian order as needed */
11971 	lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
11972 
11973 	/* Invoke the proper event handling routine */
11974 	if (!bf_get(lpfc_trailer_async, &mcqe))
11975 		workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
11976 	else
11977 		workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
11978 	return workposted;
11979 }
11980 
11981 /**
11982  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11983  * @phba: Pointer to HBA context object.
11984  * @cq: Pointer to associated CQ
11985  * @wcqe: Pointer to work-queue completion queue entry.
11986  *
11987  * This routine handles an ELS work-queue completion event.
11988  *
11989  * Return: true if work posted to worker thread, otherwise false.
11990  **/
11991 static bool
11992 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11993 			     struct lpfc_wcqe_complete *wcqe)
11994 {
11995 	struct lpfc_iocbq *irspiocbq;
11996 	unsigned long iflags;
11997 	struct lpfc_sli_ring *pring = cq->pring;
11998 	int txq_cnt = 0;
11999 	int txcmplq_cnt = 0;
12000 	int fcp_txcmplq_cnt = 0;
12001 
12002 	/* Get an irspiocbq for later ELS response processing use */
12003 	irspiocbq = lpfc_sli_get_iocbq(phba);
12004 	if (!irspiocbq) {
12005 		if (!list_empty(&pring->txq))
12006 			txq_cnt++;
12007 		if (!list_empty(&pring->txcmplq))
12008 			txcmplq_cnt++;
12009 		if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
12010 			fcp_txcmplq_cnt++;
12011 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12012 			"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12013 			"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
12014 			txq_cnt, phba->iocb_cnt,
12015 			fcp_txcmplq_cnt,
12016 			txcmplq_cnt);
12017 		return false;
12018 	}
12019 
12020 	/* Save off the slow-path queue event for work thread to process */
12021 	memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
12022 	spin_lock_irqsave(&phba->hbalock, iflags);
12023 	list_add_tail(&irspiocbq->cq_event.list,
12024 		      &phba->sli4_hba.sp_queue_event);
12025 	phba->hba_flag |= HBA_SP_QUEUE_EVT;
12026 	spin_unlock_irqrestore(&phba->hbalock, iflags);
12027 
12028 	return true;
12029 }
12030 
12031 /**
12032  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
12033  * @phba: Pointer to HBA context object.
12034  * @wcqe: Pointer to work-queue completion queue entry.
12035  *
12036  * This routine handles slow-path WQ entry comsumed event by invoking the
12037  * proper WQ release routine to the slow-path WQ.
12038  **/
12039 static void
12040 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12041 			     struct lpfc_wcqe_release *wcqe)
12042 {
12043 	/* sanity check on queue memory */
12044 	if (unlikely(!phba->sli4_hba.els_wq))
12045 		return;
12046 	/* Check for the slow-path ELS work queue */
12047 	if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12048 		lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12049 				     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12050 	else
12051 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12052 				"2579 Slow-path wqe consume event carries "
12053 				"miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12054 				bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12055 				phba->sli4_hba.els_wq->queue_id);
12056 }
12057 
12058 /**
12059  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
12060  * @phba: Pointer to HBA context object.
12061  * @cq: Pointer to a WQ completion queue.
12062  * @wcqe: Pointer to work-queue completion queue entry.
12063  *
12064  * This routine handles an XRI abort event.
12065  *
12066  * Return: true if work posted to worker thread, otherwise false.
12067  **/
12068 static bool
12069 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12070 				   struct lpfc_queue *cq,
12071 				   struct sli4_wcqe_xri_aborted *wcqe)
12072 {
12073 	bool workposted = false;
12074 	struct lpfc_cq_event *cq_event;
12075 	unsigned long iflags;
12076 
12077 	/* Allocate a new internal CQ_EVENT entry */
12078 	cq_event = lpfc_sli4_cq_event_alloc(phba);
12079 	if (!cq_event) {
12080 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12081 				"0602 Failed to allocate CQ_EVENT entry\n");
12082 		return false;
12083 	}
12084 
12085 	/* Move the CQE into the proper xri abort event list */
12086 	memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12087 	switch (cq->subtype) {
12088 	case LPFC_FCP:
12089 		spin_lock_irqsave(&phba->hbalock, iflags);
12090 		list_add_tail(&cq_event->list,
12091 			      &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
12092 		/* Set the fcp xri abort event flag */
12093 		phba->hba_flag |= FCP_XRI_ABORT_EVENT;
12094 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12095 		workposted = true;
12096 		break;
12097 	case LPFC_ELS:
12098 		spin_lock_irqsave(&phba->hbalock, iflags);
12099 		list_add_tail(&cq_event->list,
12100 			      &phba->sli4_hba.sp_els_xri_aborted_work_queue);
12101 		/* Set the els xri abort event flag */
12102 		phba->hba_flag |= ELS_XRI_ABORT_EVENT;
12103 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12104 		workposted = true;
12105 		break;
12106 	default:
12107 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12108 				"0603 Invalid work queue CQE subtype (x%x)\n",
12109 				cq->subtype);
12110 		workposted = false;
12111 		break;
12112 	}
12113 	return workposted;
12114 }
12115 
12116 /**
12117  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
12118  * @phba: Pointer to HBA context object.
12119  * @rcqe: Pointer to receive-queue completion queue entry.
12120  *
12121  * This routine process a receive-queue completion queue entry.
12122  *
12123  * Return: true if work posted to worker thread, otherwise false.
12124  **/
12125 static bool
12126 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12127 {
12128 	bool workposted = false;
12129 	struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12130 	struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
12131 	struct hbq_dmabuf *dma_buf;
12132 	uint32_t status, rq_id;
12133 	unsigned long iflags;
12134 
12135 	/* sanity check on queue memory */
12136 	if (unlikely(!hrq) || unlikely(!drq))
12137 		return workposted;
12138 
12139 	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
12140 		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
12141 	else
12142 		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
12143 	if (rq_id != hrq->queue_id)
12144 		goto out;
12145 
12146 	status = bf_get(lpfc_rcqe_status, rcqe);
12147 	switch (status) {
12148 	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12149 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12150 				"2537 Receive Frame Truncated!!\n");
12151 		hrq->RQ_buf_trunc++;
12152 	case FC_STATUS_RQ_SUCCESS:
12153 		lpfc_sli4_rq_release(hrq, drq);
12154 		spin_lock_irqsave(&phba->hbalock, iflags);
12155 		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
12156 		if (!dma_buf) {
12157 			hrq->RQ_no_buf_found++;
12158 			spin_unlock_irqrestore(&phba->hbalock, iflags);
12159 			goto out;
12160 		}
12161 		hrq->RQ_rcv_buf++;
12162 		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
12163 		/* save off the frame for the word thread to process */
12164 		list_add_tail(&dma_buf->cq_event.list,
12165 			      &phba->sli4_hba.sp_queue_event);
12166 		/* Frame received */
12167 		phba->hba_flag |= HBA_SP_QUEUE_EVT;
12168 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12169 		workposted = true;
12170 		break;
12171 	case FC_STATUS_INSUFF_BUF_NEED_BUF:
12172 	case FC_STATUS_INSUFF_BUF_FRM_DISC:
12173 		hrq->RQ_no_posted_buf++;
12174 		/* Post more buffers if possible */
12175 		spin_lock_irqsave(&phba->hbalock, iflags);
12176 		phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
12177 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12178 		workposted = true;
12179 		break;
12180 	}
12181 out:
12182 	return workposted;
12183 }
12184 
12185 /**
12186  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
12187  * @phba: Pointer to HBA context object.
12188  * @cq: Pointer to the completion queue.
12189  * @wcqe: Pointer to a completion queue entry.
12190  *
12191  * This routine process a slow-path work-queue or receive queue completion queue
12192  * entry.
12193  *
12194  * Return: true if work posted to worker thread, otherwise false.
12195  **/
12196 static bool
12197 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12198 			 struct lpfc_cqe *cqe)
12199 {
12200 	struct lpfc_cqe cqevt;
12201 	bool workposted = false;
12202 
12203 	/* Copy the work queue CQE and convert endian order if needed */
12204 	lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
12205 
12206 	/* Check and process for different type of WCQE and dispatch */
12207 	switch (bf_get(lpfc_cqe_code, &cqevt)) {
12208 	case CQE_CODE_COMPL_WQE:
12209 		/* Process the WQ/RQ complete event */
12210 		phba->last_completion_time = jiffies;
12211 		workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
12212 				(struct lpfc_wcqe_complete *)&cqevt);
12213 		break;
12214 	case CQE_CODE_RELEASE_WQE:
12215 		/* Process the WQ release event */
12216 		lpfc_sli4_sp_handle_rel_wcqe(phba,
12217 				(struct lpfc_wcqe_release *)&cqevt);
12218 		break;
12219 	case CQE_CODE_XRI_ABORTED:
12220 		/* Process the WQ XRI abort event */
12221 		phba->last_completion_time = jiffies;
12222 		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12223 				(struct sli4_wcqe_xri_aborted *)&cqevt);
12224 		break;
12225 	case CQE_CODE_RECEIVE:
12226 	case CQE_CODE_RECEIVE_V1:
12227 		/* Process the RQ event */
12228 		phba->last_completion_time = jiffies;
12229 		workposted = lpfc_sli4_sp_handle_rcqe(phba,
12230 				(struct lpfc_rcqe *)&cqevt);
12231 		break;
12232 	default:
12233 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12234 				"0388 Not a valid WCQE code: x%x\n",
12235 				bf_get(lpfc_cqe_code, &cqevt));
12236 		break;
12237 	}
12238 	return workposted;
12239 }
12240 
12241 /**
12242  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
12243  * @phba: Pointer to HBA context object.
12244  * @eqe: Pointer to fast-path event queue entry.
12245  *
12246  * This routine process a event queue entry from the slow-path event queue.
12247  * It will check the MajorCode and MinorCode to determine this is for a
12248  * completion event on a completion queue, if not, an error shall be logged
12249  * and just return. Otherwise, it will get to the corresponding completion
12250  * queue and process all the entries on that completion queue, rearm the
12251  * completion queue, and then return.
12252  *
12253  **/
12254 static void
12255 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12256 	struct lpfc_queue *speq)
12257 {
12258 	struct lpfc_queue *cq = NULL, *childq;
12259 	struct lpfc_cqe *cqe;
12260 	bool workposted = false;
12261 	int ecount = 0;
12262 	uint16_t cqid;
12263 
12264 	/* Get the reference to the corresponding CQ */
12265 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12266 
12267 	list_for_each_entry(childq, &speq->child_list, list) {
12268 		if (childq->queue_id == cqid) {
12269 			cq = childq;
12270 			break;
12271 		}
12272 	}
12273 	if (unlikely(!cq)) {
12274 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12275 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12276 					"0365 Slow-path CQ identifier "
12277 					"(%d) does not exist\n", cqid);
12278 		return;
12279 	}
12280 
12281 	/* Process all the entries to the CQ */
12282 	switch (cq->type) {
12283 	case LPFC_MCQ:
12284 		while ((cqe = lpfc_sli4_cq_get(cq))) {
12285 			workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
12286 			if (!(++ecount % cq->entry_repost))
12287 				lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12288 			cq->CQ_mbox++;
12289 		}
12290 		break;
12291 	case LPFC_WCQ:
12292 		while ((cqe = lpfc_sli4_cq_get(cq))) {
12293 			if (cq->subtype == LPFC_FCP)
12294 				workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
12295 								       cqe);
12296 			else
12297 				workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
12298 								      cqe);
12299 			if (!(++ecount % cq->entry_repost))
12300 				lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12301 		}
12302 
12303 		/* Track the max number of CQEs processed in 1 EQ */
12304 		if (ecount > cq->CQ_max_cqe)
12305 			cq->CQ_max_cqe = ecount;
12306 		break;
12307 	default:
12308 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12309 				"0370 Invalid completion queue type (%d)\n",
12310 				cq->type);
12311 		return;
12312 	}
12313 
12314 	/* Catch the no cq entry condition, log an error */
12315 	if (unlikely(ecount == 0))
12316 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12317 				"0371 No entry from the CQ: identifier "
12318 				"(x%x), type (%d)\n", cq->queue_id, cq->type);
12319 
12320 	/* In any case, flash and re-arm the RCQ */
12321 	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12322 
12323 	/* wake up worker thread if there are works to be done */
12324 	if (workposted)
12325 		lpfc_worker_wake_up(phba);
12326 }
12327 
12328 /**
12329  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
12330  * @phba: Pointer to HBA context object.
12331  * @cq: Pointer to associated CQ
12332  * @wcqe: Pointer to work-queue completion queue entry.
12333  *
12334  * This routine process a fast-path work queue completion entry from fast-path
12335  * event queue for FCP command response completion.
12336  **/
12337 static void
12338 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12339 			     struct lpfc_wcqe_complete *wcqe)
12340 {
12341 	struct lpfc_sli_ring *pring = cq->pring;
12342 	struct lpfc_iocbq *cmdiocbq;
12343 	struct lpfc_iocbq irspiocbq;
12344 	unsigned long iflags;
12345 
12346 	/* Check for response status */
12347 	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
12348 		/* If resource errors reported from HBA, reduce queue
12349 		 * depth of the SCSI device.
12350 		 */
12351 		if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
12352 		     IOSTAT_LOCAL_REJECT)) &&
12353 		    ((wcqe->parameter & IOERR_PARAM_MASK) ==
12354 		     IOERR_NO_RESOURCES))
12355 			phba->lpfc_rampdown_queue_depth(phba);
12356 
12357 		/* Log the error status */
12358 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12359 				"0373 FCP complete error: status=x%x, "
12360 				"hw_status=x%x, total_data_specified=%d, "
12361 				"parameter=x%x, word3=x%x\n",
12362 				bf_get(lpfc_wcqe_c_status, wcqe),
12363 				bf_get(lpfc_wcqe_c_hw_status, wcqe),
12364 				wcqe->total_data_placed, wcqe->parameter,
12365 				wcqe->word3);
12366 	}
12367 
12368 	/* Look up the FCP command IOCB and create pseudo response IOCB */
12369 	spin_lock_irqsave(&pring->ring_lock, iflags);
12370 	pring->stats.iocb_event++;
12371 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12372 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
12373 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
12374 	if (unlikely(!cmdiocbq)) {
12375 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12376 				"0374 FCP complete with no corresponding "
12377 				"cmdiocb: iotag (%d)\n",
12378 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
12379 		return;
12380 	}
12381 	if (unlikely(!cmdiocbq->iocb_cmpl)) {
12382 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12383 				"0375 FCP cmdiocb not callback function "
12384 				"iotag: (%d)\n",
12385 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
12386 		return;
12387 	}
12388 
12389 	/* Fake the irspiocb and copy necessary response information */
12390 	lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
12391 
12392 	if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
12393 		spin_lock_irqsave(&phba->hbalock, iflags);
12394 		cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
12395 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12396 	}
12397 
12398 	/* Pass the cmd_iocb and the rsp state to the upper layer */
12399 	(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
12400 }
12401 
12402 /**
12403  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
12404  * @phba: Pointer to HBA context object.
12405  * @cq: Pointer to completion queue.
12406  * @wcqe: Pointer to work-queue completion queue entry.
12407  *
12408  * This routine handles an fast-path WQ entry comsumed event by invoking the
12409  * proper WQ release routine to the slow-path WQ.
12410  **/
12411 static void
12412 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12413 			     struct lpfc_wcqe_release *wcqe)
12414 {
12415 	struct lpfc_queue *childwq;
12416 	bool wqid_matched = false;
12417 	uint16_t fcp_wqid;
12418 
12419 	/* Check for fast-path FCP work queue release */
12420 	fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
12421 	list_for_each_entry(childwq, &cq->child_list, list) {
12422 		if (childwq->queue_id == fcp_wqid) {
12423 			lpfc_sli4_wq_release(childwq,
12424 					bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12425 			wqid_matched = true;
12426 			break;
12427 		}
12428 	}
12429 	/* Report warning log message if no match found */
12430 	if (wqid_matched != true)
12431 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12432 				"2580 Fast-path wqe consume event carries "
12433 				"miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
12434 }
12435 
12436 /**
12437  * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
12438  * @cq: Pointer to the completion queue.
12439  * @eqe: Pointer to fast-path completion queue entry.
12440  *
12441  * This routine process a fast-path work queue completion entry from fast-path
12442  * event queue for FCP command response completion.
12443  **/
12444 static int
12445 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12446 			 struct lpfc_cqe *cqe)
12447 {
12448 	struct lpfc_wcqe_release wcqe;
12449 	bool workposted = false;
12450 
12451 	/* Copy the work queue CQE and convert endian order if needed */
12452 	lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
12453 
12454 	/* Check and process for different type of WCQE and dispatch */
12455 	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
12456 	case CQE_CODE_COMPL_WQE:
12457 		cq->CQ_wq++;
12458 		/* Process the WQ complete event */
12459 		phba->last_completion_time = jiffies;
12460 		lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
12461 				(struct lpfc_wcqe_complete *)&wcqe);
12462 		break;
12463 	case CQE_CODE_RELEASE_WQE:
12464 		cq->CQ_release_wqe++;
12465 		/* Process the WQ release event */
12466 		lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
12467 				(struct lpfc_wcqe_release *)&wcqe);
12468 		break;
12469 	case CQE_CODE_XRI_ABORTED:
12470 		cq->CQ_xri_aborted++;
12471 		/* Process the WQ XRI abort event */
12472 		phba->last_completion_time = jiffies;
12473 		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12474 				(struct sli4_wcqe_xri_aborted *)&wcqe);
12475 		break;
12476 	default:
12477 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12478 				"0144 Not a valid WCQE code: x%x\n",
12479 				bf_get(lpfc_wcqe_c_code, &wcqe));
12480 		break;
12481 	}
12482 	return workposted;
12483 }
12484 
12485 /**
12486  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
12487  * @phba: Pointer to HBA context object.
12488  * @eqe: Pointer to fast-path event queue entry.
12489  *
12490  * This routine process a event queue entry from the fast-path event queue.
12491  * It will check the MajorCode and MinorCode to determine this is for a
12492  * completion event on a completion queue, if not, an error shall be logged
12493  * and just return. Otherwise, it will get to the corresponding completion
12494  * queue and process all the entries on the completion queue, rearm the
12495  * completion queue, and then return.
12496  **/
12497 static void
12498 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12499 			uint32_t qidx)
12500 {
12501 	struct lpfc_queue *cq;
12502 	struct lpfc_cqe *cqe;
12503 	bool workposted = false;
12504 	uint16_t cqid;
12505 	int ecount = 0;
12506 
12507 	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12508 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12509 				"0366 Not a valid completion "
12510 				"event: majorcode=x%x, minorcode=x%x\n",
12511 				bf_get_le32(lpfc_eqe_major_code, eqe),
12512 				bf_get_le32(lpfc_eqe_minor_code, eqe));
12513 		return;
12514 	}
12515 
12516 	/* Get the reference to the corresponding CQ */
12517 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12518 
12519 	/* Check if this is a Slow path event */
12520 	if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
12521 		lpfc_sli4_sp_handle_eqe(phba, eqe,
12522 			phba->sli4_hba.hba_eq[qidx]);
12523 		return;
12524 	}
12525 
12526 	if (unlikely(!phba->sli4_hba.fcp_cq)) {
12527 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12528 				"3146 Fast-path completion queues "
12529 				"does not exist\n");
12530 		return;
12531 	}
12532 	cq = phba->sli4_hba.fcp_cq[qidx];
12533 	if (unlikely(!cq)) {
12534 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12535 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12536 					"0367 Fast-path completion queue "
12537 					"(%d) does not exist\n", qidx);
12538 		return;
12539 	}
12540 
12541 	if (unlikely(cqid != cq->queue_id)) {
12542 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12543 				"0368 Miss-matched fast-path completion "
12544 				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
12545 				cqid, cq->queue_id);
12546 		return;
12547 	}
12548 
12549 	/* Process all the entries to the CQ */
12550 	while ((cqe = lpfc_sli4_cq_get(cq))) {
12551 		workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
12552 		if (!(++ecount % cq->entry_repost))
12553 			lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12554 	}
12555 
12556 	/* Track the max number of CQEs processed in 1 EQ */
12557 	if (ecount > cq->CQ_max_cqe)
12558 		cq->CQ_max_cqe = ecount;
12559 
12560 	/* Catch the no cq entry condition */
12561 	if (unlikely(ecount == 0))
12562 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12563 				"0369 No entry from fast-path completion "
12564 				"queue fcpcqid=%d\n", cq->queue_id);
12565 
12566 	/* In any case, flash and re-arm the CQ */
12567 	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12568 
12569 	/* wake up worker thread if there are works to be done */
12570 	if (workposted)
12571 		lpfc_worker_wake_up(phba);
12572 }
12573 
12574 static void
12575 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
12576 {
12577 	struct lpfc_eqe *eqe;
12578 
12579 	/* walk all the EQ entries and drop on the floor */
12580 	while ((eqe = lpfc_sli4_eq_get(eq)))
12581 		;
12582 
12583 	/* Clear and re-arm the EQ */
12584 	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12585 }
12586 
12587 
12588 /**
12589  * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
12590  *			     entry
12591  * @phba: Pointer to HBA context object.
12592  * @eqe: Pointer to fast-path event queue entry.
12593  *
12594  * This routine process a event queue entry from the Flash Optimized Fabric
12595  * event queue.  It will check the MajorCode and MinorCode to determine this
12596  * is for a completion event on a completion queue, if not, an error shall be
12597  * logged and just return. Otherwise, it will get to the corresponding
12598  * completion queue and process all the entries on the completion queue, rearm
12599  * the completion queue, and then return.
12600  **/
12601 static void
12602 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
12603 {
12604 	struct lpfc_queue *cq;
12605 	struct lpfc_cqe *cqe;
12606 	bool workposted = false;
12607 	uint16_t cqid;
12608 	int ecount = 0;
12609 
12610 	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12611 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12612 				"9147 Not a valid completion "
12613 				"event: majorcode=x%x, minorcode=x%x\n",
12614 				bf_get_le32(lpfc_eqe_major_code, eqe),
12615 				bf_get_le32(lpfc_eqe_minor_code, eqe));
12616 		return;
12617 	}
12618 
12619 	/* Get the reference to the corresponding CQ */
12620 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12621 
12622 	/* Next check for OAS */
12623 	cq = phba->sli4_hba.oas_cq;
12624 	if (unlikely(!cq)) {
12625 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12626 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12627 					"9148 OAS completion queue "
12628 					"does not exist\n");
12629 		return;
12630 	}
12631 
12632 	if (unlikely(cqid != cq->queue_id)) {
12633 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12634 				"9149 Miss-matched fast-path compl "
12635 				"queue id: eqcqid=%d, fcpcqid=%d\n",
12636 				cqid, cq->queue_id);
12637 		return;
12638 	}
12639 
12640 	/* Process all the entries to the OAS CQ */
12641 	while ((cqe = lpfc_sli4_cq_get(cq))) {
12642 		workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
12643 		if (!(++ecount % cq->entry_repost))
12644 			lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12645 	}
12646 
12647 	/* Track the max number of CQEs processed in 1 EQ */
12648 	if (ecount > cq->CQ_max_cqe)
12649 		cq->CQ_max_cqe = ecount;
12650 
12651 	/* Catch the no cq entry condition */
12652 	if (unlikely(ecount == 0))
12653 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12654 				"9153 No entry from fast-path completion "
12655 				"queue fcpcqid=%d\n", cq->queue_id);
12656 
12657 	/* In any case, flash and re-arm the CQ */
12658 	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12659 
12660 	/* wake up worker thread if there are works to be done */
12661 	if (workposted)
12662 		lpfc_worker_wake_up(phba);
12663 }
12664 
12665 /**
12666  * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
12667  * @irq: Interrupt number.
12668  * @dev_id: The device context pointer.
12669  *
12670  * This function is directly called from the PCI layer as an interrupt
12671  * service routine when device with SLI-4 interface spec is enabled with
12672  * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
12673  * IOCB ring event in the HBA. However, when the device is enabled with either
12674  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12675  * device-level interrupt handler. When the PCI slot is in error recovery
12676  * or the HBA is undergoing initialization, the interrupt handler will not
12677  * process the interrupt. The Flash Optimized Fabric ring event are handled in
12678  * the intrrupt context. This function is called without any lock held.
12679  * It gets the hbalock to access and update SLI data structures. Note that,
12680  * the EQ to CQ are one-to-one map such that the EQ index is
12681  * equal to that of CQ index.
12682  *
12683  * This function returns IRQ_HANDLED when interrupt is handled else it
12684  * returns IRQ_NONE.
12685  **/
12686 irqreturn_t
12687 lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
12688 {
12689 	struct lpfc_hba *phba;
12690 	struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
12691 	struct lpfc_queue *eq;
12692 	struct lpfc_eqe *eqe;
12693 	unsigned long iflag;
12694 	int ecount = 0;
12695 
12696 	/* Get the driver's phba structure from the dev_id */
12697 	fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
12698 	phba = fcp_eq_hdl->phba;
12699 
12700 	if (unlikely(!phba))
12701 		return IRQ_NONE;
12702 
12703 	/* Get to the EQ struct associated with this vector */
12704 	eq = phba->sli4_hba.fof_eq;
12705 	if (unlikely(!eq))
12706 		return IRQ_NONE;
12707 
12708 	/* Check device state for handling interrupt */
12709 	if (unlikely(lpfc_intr_state_check(phba))) {
12710 		eq->EQ_badstate++;
12711 		/* Check again for link_state with lock held */
12712 		spin_lock_irqsave(&phba->hbalock, iflag);
12713 		if (phba->link_state < LPFC_LINK_DOWN)
12714 			/* Flush, clear interrupt, and rearm the EQ */
12715 			lpfc_sli4_eq_flush(phba, eq);
12716 		spin_unlock_irqrestore(&phba->hbalock, iflag);
12717 		return IRQ_NONE;
12718 	}
12719 
12720 	/*
12721 	 * Process all the event on FCP fast-path EQ
12722 	 */
12723 	while ((eqe = lpfc_sli4_eq_get(eq))) {
12724 		lpfc_sli4_fof_handle_eqe(phba, eqe);
12725 		if (!(++ecount % eq->entry_repost))
12726 			lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
12727 		eq->EQ_processed++;
12728 	}
12729 
12730 	/* Track the max number of EQEs processed in 1 intr */
12731 	if (ecount > eq->EQ_max_eqe)
12732 		eq->EQ_max_eqe = ecount;
12733 
12734 
12735 	if (unlikely(ecount == 0)) {
12736 		eq->EQ_no_entry++;
12737 
12738 		if (phba->intr_type == MSIX)
12739 			/* MSI-X treated interrupt served as no EQ share INT */
12740 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12741 					"9145 MSI-X interrupt with no EQE\n");
12742 		else {
12743 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12744 					"9146 ISR interrupt with no EQE\n");
12745 			/* Non MSI-X treated on interrupt as EQ share INT */
12746 			return IRQ_NONE;
12747 		}
12748 	}
12749 	/* Always clear and re-arm the fast-path EQ */
12750 	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12751 	return IRQ_HANDLED;
12752 }
12753 
12754 /**
12755  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
12756  * @irq: Interrupt number.
12757  * @dev_id: The device context pointer.
12758  *
12759  * This function is directly called from the PCI layer as an interrupt
12760  * service routine when device with SLI-4 interface spec is enabled with
12761  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12762  * ring event in the HBA. However, when the device is enabled with either
12763  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12764  * device-level interrupt handler. When the PCI slot is in error recovery
12765  * or the HBA is undergoing initialization, the interrupt handler will not
12766  * process the interrupt. The SCSI FCP fast-path ring event are handled in
12767  * the intrrupt context. This function is called without any lock held.
12768  * It gets the hbalock to access and update SLI data structures. Note that,
12769  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
12770  * equal to that of FCP CQ index.
12771  *
12772  * The link attention and ELS ring attention events are handled
12773  * by the worker thread. The interrupt handler signals the worker thread
12774  * and returns for these events. This function is called without any lock
12775  * held. It gets the hbalock to access and update SLI data structures.
12776  *
12777  * This function returns IRQ_HANDLED when interrupt is handled else it
12778  * returns IRQ_NONE.
12779  **/
12780 irqreturn_t
12781 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
12782 {
12783 	struct lpfc_hba *phba;
12784 	struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
12785 	struct lpfc_queue *fpeq;
12786 	struct lpfc_eqe *eqe;
12787 	unsigned long iflag;
12788 	int ecount = 0;
12789 	int fcp_eqidx;
12790 
12791 	/* Get the driver's phba structure from the dev_id */
12792 	fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
12793 	phba = fcp_eq_hdl->phba;
12794 	fcp_eqidx = fcp_eq_hdl->idx;
12795 
12796 	if (unlikely(!phba))
12797 		return IRQ_NONE;
12798 	if (unlikely(!phba->sli4_hba.hba_eq))
12799 		return IRQ_NONE;
12800 
12801 	/* Get to the EQ struct associated with this vector */
12802 	fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
12803 	if (unlikely(!fpeq))
12804 		return IRQ_NONE;
12805 
12806 	if (lpfc_fcp_look_ahead) {
12807 		if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
12808 			lpfc_sli4_eq_clr_intr(fpeq);
12809 		else {
12810 			atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12811 			return IRQ_NONE;
12812 		}
12813 	}
12814 
12815 	/* Check device state for handling interrupt */
12816 	if (unlikely(lpfc_intr_state_check(phba))) {
12817 		fpeq->EQ_badstate++;
12818 		/* Check again for link_state with lock held */
12819 		spin_lock_irqsave(&phba->hbalock, iflag);
12820 		if (phba->link_state < LPFC_LINK_DOWN)
12821 			/* Flush, clear interrupt, and rearm the EQ */
12822 			lpfc_sli4_eq_flush(phba, fpeq);
12823 		spin_unlock_irqrestore(&phba->hbalock, iflag);
12824 		if (lpfc_fcp_look_ahead)
12825 			atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12826 		return IRQ_NONE;
12827 	}
12828 
12829 	/*
12830 	 * Process all the event on FCP fast-path EQ
12831 	 */
12832 	while ((eqe = lpfc_sli4_eq_get(fpeq))) {
12833 		if (eqe == NULL)
12834 			break;
12835 
12836 		lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
12837 		if (!(++ecount % fpeq->entry_repost))
12838 			lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
12839 		fpeq->EQ_processed++;
12840 	}
12841 
12842 	/* Track the max number of EQEs processed in 1 intr */
12843 	if (ecount > fpeq->EQ_max_eqe)
12844 		fpeq->EQ_max_eqe = ecount;
12845 
12846 	/* Always clear and re-arm the fast-path EQ */
12847 	lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
12848 
12849 	if (unlikely(ecount == 0)) {
12850 		fpeq->EQ_no_entry++;
12851 
12852 		if (lpfc_fcp_look_ahead) {
12853 			atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12854 			return IRQ_NONE;
12855 		}
12856 
12857 		if (phba->intr_type == MSIX)
12858 			/* MSI-X treated interrupt served as no EQ share INT */
12859 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12860 					"0358 MSI-X interrupt with no EQE\n");
12861 		else
12862 			/* Non MSI-X treated on interrupt as EQ share INT */
12863 			return IRQ_NONE;
12864 	}
12865 
12866 	if (lpfc_fcp_look_ahead)
12867 		atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12868 	return IRQ_HANDLED;
12869 } /* lpfc_sli4_fp_intr_handler */
12870 
12871 /**
12872  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
12873  * @irq: Interrupt number.
12874  * @dev_id: The device context pointer.
12875  *
12876  * This function is the device-level interrupt handler to device with SLI-4
12877  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
12878  * interrupt mode is enabled and there is an event in the HBA which requires
12879  * driver attention. This function invokes the slow-path interrupt attention
12880  * handling function and fast-path interrupt attention handling function in
12881  * turn to process the relevant HBA attention events. This function is called
12882  * without any lock held. It gets the hbalock to access and update SLI data
12883  * structures.
12884  *
12885  * This function returns IRQ_HANDLED when interrupt is handled, else it
12886  * returns IRQ_NONE.
12887  **/
12888 irqreturn_t
12889 lpfc_sli4_intr_handler(int irq, void *dev_id)
12890 {
12891 	struct lpfc_hba  *phba;
12892 	irqreturn_t hba_irq_rc;
12893 	bool hba_handled = false;
12894 	int fcp_eqidx;
12895 
12896 	/* Get the driver's phba structure from the dev_id */
12897 	phba = (struct lpfc_hba *)dev_id;
12898 
12899 	if (unlikely(!phba))
12900 		return IRQ_NONE;
12901 
12902 	/*
12903 	 * Invoke fast-path host attention interrupt handling as appropriate.
12904 	 */
12905 	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
12906 		hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
12907 					&phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
12908 		if (hba_irq_rc == IRQ_HANDLED)
12909 			hba_handled |= true;
12910 	}
12911 
12912 	if (phba->cfg_fof) {
12913 		hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
12914 					&phba->sli4_hba.fcp_eq_hdl[0]);
12915 		if (hba_irq_rc == IRQ_HANDLED)
12916 			hba_handled |= true;
12917 	}
12918 
12919 	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
12920 } /* lpfc_sli4_intr_handler */
12921 
12922 /**
12923  * lpfc_sli4_queue_free - free a queue structure and associated memory
12924  * @queue: The queue structure to free.
12925  *
12926  * This function frees a queue structure and the DMAable memory used for
12927  * the host resident queue. This function must be called after destroying the
12928  * queue on the HBA.
12929  **/
12930 void
12931 lpfc_sli4_queue_free(struct lpfc_queue *queue)
12932 {
12933 	struct lpfc_dmabuf *dmabuf;
12934 
12935 	if (!queue)
12936 		return;
12937 
12938 	while (!list_empty(&queue->page_list)) {
12939 		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
12940 				 list);
12941 		dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
12942 				  dmabuf->virt, dmabuf->phys);
12943 		kfree(dmabuf);
12944 	}
12945 	kfree(queue);
12946 	return;
12947 }
12948 
12949 /**
12950  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
12951  * @phba: The HBA that this queue is being created on.
12952  * @entry_size: The size of each queue entry for this queue.
12953  * @entry count: The number of entries that this queue will handle.
12954  *
12955  * This function allocates a queue structure and the DMAable memory used for
12956  * the host resident queue. This function must be called before creating the
12957  * queue on the HBA.
12958  **/
12959 struct lpfc_queue *
12960 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
12961 		      uint32_t entry_count)
12962 {
12963 	struct lpfc_queue *queue;
12964 	struct lpfc_dmabuf *dmabuf;
12965 	int x, total_qe_count;
12966 	void *dma_pointer;
12967 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12968 
12969 	if (!phba->sli4_hba.pc_sli4_params.supported)
12970 		hw_page_size = SLI4_PAGE_SIZE;
12971 
12972 	queue = kzalloc(sizeof(struct lpfc_queue) +
12973 			(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
12974 	if (!queue)
12975 		return NULL;
12976 	queue->page_count = (ALIGN(entry_size * entry_count,
12977 			hw_page_size))/hw_page_size;
12978 	INIT_LIST_HEAD(&queue->list);
12979 	INIT_LIST_HEAD(&queue->page_list);
12980 	INIT_LIST_HEAD(&queue->child_list);
12981 	for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
12982 		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
12983 		if (!dmabuf)
12984 			goto out_fail;
12985 		dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
12986 						   hw_page_size, &dmabuf->phys,
12987 						   GFP_KERNEL);
12988 		if (!dmabuf->virt) {
12989 			kfree(dmabuf);
12990 			goto out_fail;
12991 		}
12992 		dmabuf->buffer_tag = x;
12993 		list_add_tail(&dmabuf->list, &queue->page_list);
12994 		/* initialize queue's entry array */
12995 		dma_pointer = dmabuf->virt;
12996 		for (; total_qe_count < entry_count &&
12997 		     dma_pointer < (hw_page_size + dmabuf->virt);
12998 		     total_qe_count++, dma_pointer += entry_size) {
12999 			queue->qe[total_qe_count].address = dma_pointer;
13000 		}
13001 	}
13002 	queue->entry_size = entry_size;
13003 	queue->entry_count = entry_count;
13004 
13005 	/*
13006 	 * entry_repost is calculated based on the number of entries in the
13007 	 * queue. This works out except for RQs. If buffers are NOT initially
13008 	 * posted for every RQE, entry_repost should be adjusted accordingly.
13009 	 */
13010 	queue->entry_repost = (entry_count >> 3);
13011 	if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
13012 		queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
13013 	queue->phba = phba;
13014 
13015 	return queue;
13016 out_fail:
13017 	lpfc_sli4_queue_free(queue);
13018 	return NULL;
13019 }
13020 
13021 /**
13022  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
13023  * @phba: HBA structure that indicates port to create a queue on.
13024  * @pci_barset: PCI BAR set flag.
13025  *
13026  * This function shall perform iomap of the specified PCI BAR address to host
13027  * memory address if not already done so and return it. The returned host
13028  * memory address can be NULL.
13029  */
13030 static void __iomem *
13031 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
13032 {
13033 	if (!phba->pcidev)
13034 		return NULL;
13035 
13036 	switch (pci_barset) {
13037 	case WQ_PCI_BAR_0_AND_1:
13038 		return phba->pci_bar0_memmap_p;
13039 	case WQ_PCI_BAR_2_AND_3:
13040 		return phba->pci_bar2_memmap_p;
13041 	case WQ_PCI_BAR_4_AND_5:
13042 		return phba->pci_bar4_memmap_p;
13043 	default:
13044 		break;
13045 	}
13046 	return NULL;
13047 }
13048 
13049 /**
13050  * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
13051  * @phba: HBA structure that indicates port to create a queue on.
13052  * @startq: The starting FCP EQ to modify
13053  *
13054  * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
13055  *
13056  * The @phba struct is used to send mailbox command to HBA. The @startq
13057  * is used to get the starting FCP EQ to change.
13058  * This function is asynchronous and will wait for the mailbox
13059  * command to finish before continuing.
13060  *
13061  * On success this function will return a zero. If unable to allocate enough
13062  * memory this function will return -ENOMEM. If the queue create mailbox command
13063  * fails this function will return -ENXIO.
13064  **/
13065 int
13066 lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
13067 {
13068 	struct lpfc_mbx_modify_eq_delay *eq_delay;
13069 	LPFC_MBOXQ_t *mbox;
13070 	struct lpfc_queue *eq;
13071 	int cnt, rc, length, status = 0;
13072 	uint32_t shdr_status, shdr_add_status;
13073 	uint32_t result;
13074 	int fcp_eqidx;
13075 	union lpfc_sli4_cfg_shdr *shdr;
13076 	uint16_t dmult;
13077 
13078 	if (startq >= phba->cfg_fcp_io_channel)
13079 		return 0;
13080 
13081 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13082 	if (!mbox)
13083 		return -ENOMEM;
13084 	length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
13085 		  sizeof(struct lpfc_sli4_cfg_mhdr));
13086 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13087 			 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
13088 			 length, LPFC_SLI4_MBX_EMBED);
13089 	eq_delay = &mbox->u.mqe.un.eq_delay;
13090 
13091 	/* Calculate delay multiper from maximum interrupt per second */
13092 	result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
13093 	if (result > LPFC_DMULT_CONST)
13094 		dmult = 0;
13095 	else
13096 		dmult = LPFC_DMULT_CONST/result - 1;
13097 
13098 	cnt = 0;
13099 	for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
13100 	    fcp_eqidx++) {
13101 		eq = phba->sli4_hba.hba_eq[fcp_eqidx];
13102 		if (!eq)
13103 			continue;
13104 		eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
13105 		eq_delay->u.request.eq[cnt].phase = 0;
13106 		eq_delay->u.request.eq[cnt].delay_multi = dmult;
13107 		cnt++;
13108 		if (cnt >= LPFC_MAX_EQ_DELAY)
13109 			break;
13110 	}
13111 	eq_delay->u.request.num_eq = cnt;
13112 
13113 	mbox->vport = phba->pport;
13114 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13115 	mbox->context1 = NULL;
13116 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13117 	shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
13118 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13119 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13120 	if (shdr_status || shdr_add_status || rc) {
13121 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13122 				"2512 MODIFY_EQ_DELAY mailbox failed with "
13123 				"status x%x add_status x%x, mbx status x%x\n",
13124 				shdr_status, shdr_add_status, rc);
13125 		status = -ENXIO;
13126 	}
13127 	mempool_free(mbox, phba->mbox_mem_pool);
13128 	return status;
13129 }
13130 
13131 /**
13132  * lpfc_eq_create - Create an Event Queue on the HBA
13133  * @phba: HBA structure that indicates port to create a queue on.
13134  * @eq: The queue structure to use to create the event queue.
13135  * @imax: The maximum interrupt per second limit.
13136  *
13137  * This function creates an event queue, as detailed in @eq, on a port,
13138  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
13139  *
13140  * The @phba struct is used to send mailbox command to HBA. The @eq struct
13141  * is used to get the entry count and entry size that are necessary to
13142  * determine the number of pages to allocate and use for this queue. This
13143  * function will send the EQ_CREATE mailbox command to the HBA to setup the
13144  * event queue. This function is asynchronous and will wait for the mailbox
13145  * command to finish before continuing.
13146  *
13147  * On success this function will return a zero. If unable to allocate enough
13148  * memory this function will return -ENOMEM. If the queue create mailbox command
13149  * fails this function will return -ENXIO.
13150  **/
13151 int
13152 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
13153 {
13154 	struct lpfc_mbx_eq_create *eq_create;
13155 	LPFC_MBOXQ_t *mbox;
13156 	int rc, length, status = 0;
13157 	struct lpfc_dmabuf *dmabuf;
13158 	uint32_t shdr_status, shdr_add_status;
13159 	union lpfc_sli4_cfg_shdr *shdr;
13160 	uint16_t dmult;
13161 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13162 
13163 	/* sanity check on queue memory */
13164 	if (!eq)
13165 		return -ENODEV;
13166 	if (!phba->sli4_hba.pc_sli4_params.supported)
13167 		hw_page_size = SLI4_PAGE_SIZE;
13168 
13169 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13170 	if (!mbox)
13171 		return -ENOMEM;
13172 	length = (sizeof(struct lpfc_mbx_eq_create) -
13173 		  sizeof(struct lpfc_sli4_cfg_mhdr));
13174 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13175 			 LPFC_MBOX_OPCODE_EQ_CREATE,
13176 			 length, LPFC_SLI4_MBX_EMBED);
13177 	eq_create = &mbox->u.mqe.un.eq_create;
13178 	bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
13179 	       eq->page_count);
13180 	bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
13181 	       LPFC_EQE_SIZE);
13182 	bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
13183 	/* don't setup delay multiplier using EQ_CREATE */
13184 	dmult = 0;
13185 	bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
13186 	       dmult);
13187 	switch (eq->entry_count) {
13188 	default:
13189 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13190 				"0360 Unsupported EQ count. (%d)\n",
13191 				eq->entry_count);
13192 		if (eq->entry_count < 256)
13193 			return -EINVAL;
13194 		/* otherwise default to smallest count (drop through) */
13195 	case 256:
13196 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13197 		       LPFC_EQ_CNT_256);
13198 		break;
13199 	case 512:
13200 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13201 		       LPFC_EQ_CNT_512);
13202 		break;
13203 	case 1024:
13204 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13205 		       LPFC_EQ_CNT_1024);
13206 		break;
13207 	case 2048:
13208 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13209 		       LPFC_EQ_CNT_2048);
13210 		break;
13211 	case 4096:
13212 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13213 		       LPFC_EQ_CNT_4096);
13214 		break;
13215 	}
13216 	list_for_each_entry(dmabuf, &eq->page_list, list) {
13217 		memset(dmabuf->virt, 0, hw_page_size);
13218 		eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13219 					putPaddrLow(dmabuf->phys);
13220 		eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13221 					putPaddrHigh(dmabuf->phys);
13222 	}
13223 	mbox->vport = phba->pport;
13224 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13225 	mbox->context1 = NULL;
13226 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13227 	shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
13228 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13229 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13230 	if (shdr_status || shdr_add_status || rc) {
13231 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13232 				"2500 EQ_CREATE mailbox failed with "
13233 				"status x%x add_status x%x, mbx status x%x\n",
13234 				shdr_status, shdr_add_status, rc);
13235 		status = -ENXIO;
13236 	}
13237 	eq->type = LPFC_EQ;
13238 	eq->subtype = LPFC_NONE;
13239 	eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
13240 	if (eq->queue_id == 0xFFFF)
13241 		status = -ENXIO;
13242 	eq->host_index = 0;
13243 	eq->hba_index = 0;
13244 
13245 	mempool_free(mbox, phba->mbox_mem_pool);
13246 	return status;
13247 }
13248 
13249 /**
13250  * lpfc_cq_create - Create a Completion Queue on the HBA
13251  * @phba: HBA structure that indicates port to create a queue on.
13252  * @cq: The queue structure to use to create the completion queue.
13253  * @eq: The event queue to bind this completion queue to.
13254  *
13255  * This function creates a completion queue, as detailed in @wq, on a port,
13256  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
13257  *
13258  * The @phba struct is used to send mailbox command to HBA. The @cq struct
13259  * is used to get the entry count and entry size that are necessary to
13260  * determine the number of pages to allocate and use for this queue. The @eq
13261  * is used to indicate which event queue to bind this completion queue to. This
13262  * function will send the CQ_CREATE mailbox command to the HBA to setup the
13263  * completion queue. This function is asynchronous and will wait for the mailbox
13264  * command to finish before continuing.
13265  *
13266  * On success this function will return a zero. If unable to allocate enough
13267  * memory this function will return -ENOMEM. If the queue create mailbox command
13268  * fails this function will return -ENXIO.
13269  **/
13270 int
13271 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
13272 	       struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
13273 {
13274 	struct lpfc_mbx_cq_create *cq_create;
13275 	struct lpfc_dmabuf *dmabuf;
13276 	LPFC_MBOXQ_t *mbox;
13277 	int rc, length, status = 0;
13278 	uint32_t shdr_status, shdr_add_status;
13279 	union lpfc_sli4_cfg_shdr *shdr;
13280 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13281 
13282 	/* sanity check on queue memory */
13283 	if (!cq || !eq)
13284 		return -ENODEV;
13285 	if (!phba->sli4_hba.pc_sli4_params.supported)
13286 		hw_page_size = SLI4_PAGE_SIZE;
13287 
13288 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13289 	if (!mbox)
13290 		return -ENOMEM;
13291 	length = (sizeof(struct lpfc_mbx_cq_create) -
13292 		  sizeof(struct lpfc_sli4_cfg_mhdr));
13293 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13294 			 LPFC_MBOX_OPCODE_CQ_CREATE,
13295 			 length, LPFC_SLI4_MBX_EMBED);
13296 	cq_create = &mbox->u.mqe.un.cq_create;
13297 	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
13298 	bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
13299 		    cq->page_count);
13300 	bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
13301 	bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
13302 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
13303 	       phba->sli4_hba.pc_sli4_params.cqv);
13304 	if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
13305 		/* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
13306 		bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
13307 		bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
13308 		       eq->queue_id);
13309 	} else {
13310 		bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
13311 		       eq->queue_id);
13312 	}
13313 	switch (cq->entry_count) {
13314 	default:
13315 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13316 				"0361 Unsupported CQ count. (%d)\n",
13317 				cq->entry_count);
13318 		if (cq->entry_count < 256) {
13319 			status = -EINVAL;
13320 			goto out;
13321 		}
13322 		/* otherwise default to smallest count (drop through) */
13323 	case 256:
13324 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13325 		       LPFC_CQ_CNT_256);
13326 		break;
13327 	case 512:
13328 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13329 		       LPFC_CQ_CNT_512);
13330 		break;
13331 	case 1024:
13332 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13333 		       LPFC_CQ_CNT_1024);
13334 		break;
13335 	}
13336 	list_for_each_entry(dmabuf, &cq->page_list, list) {
13337 		memset(dmabuf->virt, 0, hw_page_size);
13338 		cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13339 					putPaddrLow(dmabuf->phys);
13340 		cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13341 					putPaddrHigh(dmabuf->phys);
13342 	}
13343 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13344 
13345 	/* The IOCTL status is embedded in the mailbox subheader. */
13346 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13347 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13348 	if (shdr_status || shdr_add_status || rc) {
13349 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13350 				"2501 CQ_CREATE mailbox failed with "
13351 				"status x%x add_status x%x, mbx status x%x\n",
13352 				shdr_status, shdr_add_status, rc);
13353 		status = -ENXIO;
13354 		goto out;
13355 	}
13356 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13357 	if (cq->queue_id == 0xFFFF) {
13358 		status = -ENXIO;
13359 		goto out;
13360 	}
13361 	/* link the cq onto the parent eq child list */
13362 	list_add_tail(&cq->list, &eq->child_list);
13363 	/* Set up completion queue's type and subtype */
13364 	cq->type = type;
13365 	cq->subtype = subtype;
13366 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13367 	cq->assoc_qid = eq->queue_id;
13368 	cq->host_index = 0;
13369 	cq->hba_index = 0;
13370 
13371 out:
13372 	mempool_free(mbox, phba->mbox_mem_pool);
13373 	return status;
13374 }
13375 
13376 /**
13377  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
13378  * @phba: HBA structure that indicates port to create a queue on.
13379  * @mq: The queue structure to use to create the mailbox queue.
13380  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
13381  * @cq: The completion queue to associate with this cq.
13382  *
13383  * This function provides failback (fb) functionality when the
13384  * mq_create_ext fails on older FW generations.  It's purpose is identical
13385  * to mq_create_ext otherwise.
13386  *
13387  * This routine cannot fail as all attributes were previously accessed and
13388  * initialized in mq_create_ext.
13389  **/
13390 static void
13391 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
13392 		       LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
13393 {
13394 	struct lpfc_mbx_mq_create *mq_create;
13395 	struct lpfc_dmabuf *dmabuf;
13396 	int length;
13397 
13398 	length = (sizeof(struct lpfc_mbx_mq_create) -
13399 		  sizeof(struct lpfc_sli4_cfg_mhdr));
13400 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13401 			 LPFC_MBOX_OPCODE_MQ_CREATE,
13402 			 length, LPFC_SLI4_MBX_EMBED);
13403 	mq_create = &mbox->u.mqe.un.mq_create;
13404 	bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
13405 	       mq->page_count);
13406 	bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
13407 	       cq->queue_id);
13408 	bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
13409 	switch (mq->entry_count) {
13410 	case 16:
13411 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13412 		       LPFC_MQ_RING_SIZE_16);
13413 		break;
13414 	case 32:
13415 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13416 		       LPFC_MQ_RING_SIZE_32);
13417 		break;
13418 	case 64:
13419 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13420 		       LPFC_MQ_RING_SIZE_64);
13421 		break;
13422 	case 128:
13423 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13424 		       LPFC_MQ_RING_SIZE_128);
13425 		break;
13426 	}
13427 	list_for_each_entry(dmabuf, &mq->page_list, list) {
13428 		mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13429 			putPaddrLow(dmabuf->phys);
13430 		mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13431 			putPaddrHigh(dmabuf->phys);
13432 	}
13433 }
13434 
13435 /**
13436  * lpfc_mq_create - Create a mailbox Queue on the HBA
13437  * @phba: HBA structure that indicates port to create a queue on.
13438  * @mq: The queue structure to use to create the mailbox queue.
13439  * @cq: The completion queue to associate with this cq.
13440  * @subtype: The queue's subtype.
13441  *
13442  * This function creates a mailbox queue, as detailed in @mq, on a port,
13443  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
13444  *
13445  * The @phba struct is used to send mailbox command to HBA. The @cq struct
13446  * is used to get the entry count and entry size that are necessary to
13447  * determine the number of pages to allocate and use for this queue. This
13448  * function will send the MQ_CREATE mailbox command to the HBA to setup the
13449  * mailbox queue. This function is asynchronous and will wait for the mailbox
13450  * command to finish before continuing.
13451  *
13452  * On success this function will return a zero. If unable to allocate enough
13453  * memory this function will return -ENOMEM. If the queue create mailbox command
13454  * fails this function will return -ENXIO.
13455  **/
13456 int32_t
13457 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
13458 	       struct lpfc_queue *cq, uint32_t subtype)
13459 {
13460 	struct lpfc_mbx_mq_create *mq_create;
13461 	struct lpfc_mbx_mq_create_ext *mq_create_ext;
13462 	struct lpfc_dmabuf *dmabuf;
13463 	LPFC_MBOXQ_t *mbox;
13464 	int rc, length, status = 0;
13465 	uint32_t shdr_status, shdr_add_status;
13466 	union lpfc_sli4_cfg_shdr *shdr;
13467 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13468 
13469 	/* sanity check on queue memory */
13470 	if (!mq || !cq)
13471 		return -ENODEV;
13472 	if (!phba->sli4_hba.pc_sli4_params.supported)
13473 		hw_page_size = SLI4_PAGE_SIZE;
13474 
13475 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13476 	if (!mbox)
13477 		return -ENOMEM;
13478 	length = (sizeof(struct lpfc_mbx_mq_create_ext) -
13479 		  sizeof(struct lpfc_sli4_cfg_mhdr));
13480 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13481 			 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
13482 			 length, LPFC_SLI4_MBX_EMBED);
13483 
13484 	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
13485 	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
13486 	bf_set(lpfc_mbx_mq_create_ext_num_pages,
13487 	       &mq_create_ext->u.request, mq->page_count);
13488 	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
13489 	       &mq_create_ext->u.request, 1);
13490 	bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
13491 	       &mq_create_ext->u.request, 1);
13492 	bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
13493 	       &mq_create_ext->u.request, 1);
13494 	bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
13495 	       &mq_create_ext->u.request, 1);
13496 	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
13497 	       &mq_create_ext->u.request, 1);
13498 	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
13499 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
13500 	       phba->sli4_hba.pc_sli4_params.mqv);
13501 	if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
13502 		bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
13503 		       cq->queue_id);
13504 	else
13505 		bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
13506 		       cq->queue_id);
13507 	switch (mq->entry_count) {
13508 	default:
13509 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13510 				"0362 Unsupported MQ count. (%d)\n",
13511 				mq->entry_count);
13512 		if (mq->entry_count < 16) {
13513 			status = -EINVAL;
13514 			goto out;
13515 		}
13516 		/* otherwise default to smallest count (drop through) */
13517 	case 16:
13518 		bf_set(lpfc_mq_context_ring_size,
13519 		       &mq_create_ext->u.request.context,
13520 		       LPFC_MQ_RING_SIZE_16);
13521 		break;
13522 	case 32:
13523 		bf_set(lpfc_mq_context_ring_size,
13524 		       &mq_create_ext->u.request.context,
13525 		       LPFC_MQ_RING_SIZE_32);
13526 		break;
13527 	case 64:
13528 		bf_set(lpfc_mq_context_ring_size,
13529 		       &mq_create_ext->u.request.context,
13530 		       LPFC_MQ_RING_SIZE_64);
13531 		break;
13532 	case 128:
13533 		bf_set(lpfc_mq_context_ring_size,
13534 		       &mq_create_ext->u.request.context,
13535 		       LPFC_MQ_RING_SIZE_128);
13536 		break;
13537 	}
13538 	list_for_each_entry(dmabuf, &mq->page_list, list) {
13539 		memset(dmabuf->virt, 0, hw_page_size);
13540 		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
13541 					putPaddrLow(dmabuf->phys);
13542 		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
13543 					putPaddrHigh(dmabuf->phys);
13544 	}
13545 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13546 	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
13547 			      &mq_create_ext->u.response);
13548 	if (rc != MBX_SUCCESS) {
13549 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13550 				"2795 MQ_CREATE_EXT failed with "
13551 				"status x%x. Failback to MQ_CREATE.\n",
13552 				rc);
13553 		lpfc_mq_create_fb_init(phba, mq, mbox, cq);
13554 		mq_create = &mbox->u.mqe.un.mq_create;
13555 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13556 		shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
13557 		mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
13558 				      &mq_create->u.response);
13559 	}
13560 
13561 	/* The IOCTL status is embedded in the mailbox subheader. */
13562 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13563 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13564 	if (shdr_status || shdr_add_status || rc) {
13565 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13566 				"2502 MQ_CREATE mailbox failed with "
13567 				"status x%x add_status x%x, mbx status x%x\n",
13568 				shdr_status, shdr_add_status, rc);
13569 		status = -ENXIO;
13570 		goto out;
13571 	}
13572 	if (mq->queue_id == 0xFFFF) {
13573 		status = -ENXIO;
13574 		goto out;
13575 	}
13576 	mq->type = LPFC_MQ;
13577 	mq->assoc_qid = cq->queue_id;
13578 	mq->subtype = subtype;
13579 	mq->host_index = 0;
13580 	mq->hba_index = 0;
13581 
13582 	/* link the mq onto the parent cq child list */
13583 	list_add_tail(&mq->list, &cq->child_list);
13584 out:
13585 	mempool_free(mbox, phba->mbox_mem_pool);
13586 	return status;
13587 }
13588 
13589 /**
13590  * lpfc_wq_create - Create a Work Queue on the HBA
13591  * @phba: HBA structure that indicates port to create a queue on.
13592  * @wq: The queue structure to use to create the work queue.
13593  * @cq: The completion queue to bind this work queue to.
13594  * @subtype: The subtype of the work queue indicating its functionality.
13595  *
13596  * This function creates a work queue, as detailed in @wq, on a port, described
13597  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
13598  *
13599  * The @phba struct is used to send mailbox command to HBA. The @wq struct
13600  * is used to get the entry count and entry size that are necessary to
13601  * determine the number of pages to allocate and use for this queue. The @cq
13602  * is used to indicate which completion queue to bind this work queue to. This
13603  * function will send the WQ_CREATE mailbox command to the HBA to setup the
13604  * work queue. This function is asynchronous and will wait for the mailbox
13605  * command to finish before continuing.
13606  *
13607  * On success this function will return a zero. If unable to allocate enough
13608  * memory this function will return -ENOMEM. If the queue create mailbox command
13609  * fails this function will return -ENXIO.
13610  **/
13611 int
13612 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
13613 	       struct lpfc_queue *cq, uint32_t subtype)
13614 {
13615 	struct lpfc_mbx_wq_create *wq_create;
13616 	struct lpfc_dmabuf *dmabuf;
13617 	LPFC_MBOXQ_t *mbox;
13618 	int rc, length, status = 0;
13619 	uint32_t shdr_status, shdr_add_status;
13620 	union lpfc_sli4_cfg_shdr *shdr;
13621 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13622 	struct dma_address *page;
13623 	void __iomem *bar_memmap_p;
13624 	uint32_t db_offset;
13625 	uint16_t pci_barset;
13626 
13627 	/* sanity check on queue memory */
13628 	if (!wq || !cq)
13629 		return -ENODEV;
13630 	if (!phba->sli4_hba.pc_sli4_params.supported)
13631 		hw_page_size = SLI4_PAGE_SIZE;
13632 
13633 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13634 	if (!mbox)
13635 		return -ENOMEM;
13636 	length = (sizeof(struct lpfc_mbx_wq_create) -
13637 		  sizeof(struct lpfc_sli4_cfg_mhdr));
13638 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13639 			 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
13640 			 length, LPFC_SLI4_MBX_EMBED);
13641 	wq_create = &mbox->u.mqe.un.wq_create;
13642 	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
13643 	bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
13644 		    wq->page_count);
13645 	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
13646 		    cq->queue_id);
13647 
13648 	/* wqv is the earliest version supported, NOT the latest */
13649 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
13650 	       phba->sli4_hba.pc_sli4_params.wqv);
13651 
13652 	switch (phba->sli4_hba.pc_sli4_params.wqv) {
13653 	case LPFC_Q_CREATE_VERSION_0:
13654 		switch (wq->entry_size) {
13655 		default:
13656 		case 64:
13657 			/* Nothing to do, version 0 ONLY supports 64 byte */
13658 			page = wq_create->u.request.page;
13659 			break;
13660 		case 128:
13661 			if (!(phba->sli4_hba.pc_sli4_params.wqsize &
13662 			    LPFC_WQ_SZ128_SUPPORT)) {
13663 				status = -ERANGE;
13664 				goto out;
13665 			}
13666 			/* If we get here the HBA MUST also support V1 and
13667 			 * we MUST use it
13668 			 */
13669 			bf_set(lpfc_mbox_hdr_version, &shdr->request,
13670 			       LPFC_Q_CREATE_VERSION_1);
13671 
13672 			bf_set(lpfc_mbx_wq_create_wqe_count,
13673 			       &wq_create->u.request_1, wq->entry_count);
13674 			bf_set(lpfc_mbx_wq_create_wqe_size,
13675 			       &wq_create->u.request_1,
13676 			       LPFC_WQ_WQE_SIZE_128);
13677 			bf_set(lpfc_mbx_wq_create_page_size,
13678 			       &wq_create->u.request_1,
13679 			       (PAGE_SIZE/SLI4_PAGE_SIZE));
13680 			page = wq_create->u.request_1.page;
13681 			break;
13682 		}
13683 		break;
13684 	case LPFC_Q_CREATE_VERSION_1:
13685 		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
13686 		       wq->entry_count);
13687 		switch (wq->entry_size) {
13688 		default:
13689 		case 64:
13690 			bf_set(lpfc_mbx_wq_create_wqe_size,
13691 			       &wq_create->u.request_1,
13692 			       LPFC_WQ_WQE_SIZE_64);
13693 			break;
13694 		case 128:
13695 			if (!(phba->sli4_hba.pc_sli4_params.wqsize &
13696 				LPFC_WQ_SZ128_SUPPORT)) {
13697 				status = -ERANGE;
13698 				goto out;
13699 			}
13700 			bf_set(lpfc_mbx_wq_create_wqe_size,
13701 			       &wq_create->u.request_1,
13702 			       LPFC_WQ_WQE_SIZE_128);
13703 			break;
13704 		}
13705 		bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
13706 		       (PAGE_SIZE/SLI4_PAGE_SIZE));
13707 		page = wq_create->u.request_1.page;
13708 		break;
13709 	default:
13710 		status = -ERANGE;
13711 		goto out;
13712 	}
13713 
13714 	list_for_each_entry(dmabuf, &wq->page_list, list) {
13715 		memset(dmabuf->virt, 0, hw_page_size);
13716 		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
13717 		page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
13718 	}
13719 
13720 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13721 		bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
13722 
13723 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13724 	/* The IOCTL status is embedded in the mailbox subheader. */
13725 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13726 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13727 	if (shdr_status || shdr_add_status || rc) {
13728 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13729 				"2503 WQ_CREATE mailbox failed with "
13730 				"status x%x add_status x%x, mbx status x%x\n",
13731 				shdr_status, shdr_add_status, rc);
13732 		status = -ENXIO;
13733 		goto out;
13734 	}
13735 	wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
13736 	if (wq->queue_id == 0xFFFF) {
13737 		status = -ENXIO;
13738 		goto out;
13739 	}
13740 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13741 		wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
13742 				       &wq_create->u.response);
13743 		if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
13744 		    (wq->db_format != LPFC_DB_RING_FORMAT)) {
13745 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13746 					"3265 WQ[%d] doorbell format not "
13747 					"supported: x%x\n", wq->queue_id,
13748 					wq->db_format);
13749 			status = -EINVAL;
13750 			goto out;
13751 		}
13752 		pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
13753 				    &wq_create->u.response);
13754 		bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13755 		if (!bar_memmap_p) {
13756 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13757 					"3263 WQ[%d] failed to memmap pci "
13758 					"barset:x%x\n", wq->queue_id,
13759 					pci_barset);
13760 			status = -ENOMEM;
13761 			goto out;
13762 		}
13763 		db_offset = wq_create->u.response.doorbell_offset;
13764 		if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
13765 		    (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
13766 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13767 					"3252 WQ[%d] doorbell offset not "
13768 					"supported: x%x\n", wq->queue_id,
13769 					db_offset);
13770 			status = -EINVAL;
13771 			goto out;
13772 		}
13773 		wq->db_regaddr = bar_memmap_p + db_offset;
13774 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13775 				"3264 WQ[%d]: barset:x%x, offset:x%x, "
13776 				"format:x%x\n", wq->queue_id, pci_barset,
13777 				db_offset, wq->db_format);
13778 	} else {
13779 		wq->db_format = LPFC_DB_LIST_FORMAT;
13780 		wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
13781 	}
13782 	wq->type = LPFC_WQ;
13783 	wq->assoc_qid = cq->queue_id;
13784 	wq->subtype = subtype;
13785 	wq->host_index = 0;
13786 	wq->hba_index = 0;
13787 	wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
13788 
13789 	/* link the wq onto the parent cq child list */
13790 	list_add_tail(&wq->list, &cq->child_list);
13791 out:
13792 	mempool_free(mbox, phba->mbox_mem_pool);
13793 	return status;
13794 }
13795 
13796 /**
13797  * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
13798  * @phba: HBA structure that indicates port to create a queue on.
13799  * @rq:   The queue structure to use for the receive queue.
13800  * @qno:  The associated HBQ number
13801  *
13802  *
13803  * For SLI4 we need to adjust the RQ repost value based on
13804  * the number of buffers that are initially posted to the RQ.
13805  */
13806 void
13807 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
13808 {
13809 	uint32_t cnt;
13810 
13811 	/* sanity check on queue memory */
13812 	if (!rq)
13813 		return;
13814 	cnt = lpfc_hbq_defs[qno]->entry_count;
13815 
13816 	/* Recalc repost for RQs based on buffers initially posted */
13817 	cnt = (cnt >> 3);
13818 	if (cnt < LPFC_QUEUE_MIN_REPOST)
13819 		cnt = LPFC_QUEUE_MIN_REPOST;
13820 
13821 	rq->entry_repost = cnt;
13822 }
13823 
13824 /**
13825  * lpfc_rq_create - Create a Receive Queue on the HBA
13826  * @phba: HBA structure that indicates port to create a queue on.
13827  * @hrq: The queue structure to use to create the header receive queue.
13828  * @drq: The queue structure to use to create the data receive queue.
13829  * @cq: The completion queue to bind this work queue to.
13830  *
13831  * This function creates a receive buffer queue pair , as detailed in @hrq and
13832  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
13833  * to the HBA.
13834  *
13835  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
13836  * struct is used to get the entry count that is necessary to determine the
13837  * number of pages to use for this queue. The @cq is used to indicate which
13838  * completion queue to bind received buffers that are posted to these queues to.
13839  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
13840  * receive queue pair. This function is asynchronous and will wait for the
13841  * mailbox command to finish before continuing.
13842  *
13843  * On success this function will return a zero. If unable to allocate enough
13844  * memory this function will return -ENOMEM. If the queue create mailbox command
13845  * fails this function will return -ENXIO.
13846  **/
13847 int
13848 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13849 	       struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
13850 {
13851 	struct lpfc_mbx_rq_create *rq_create;
13852 	struct lpfc_dmabuf *dmabuf;
13853 	LPFC_MBOXQ_t *mbox;
13854 	int rc, length, status = 0;
13855 	uint32_t shdr_status, shdr_add_status;
13856 	union lpfc_sli4_cfg_shdr *shdr;
13857 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13858 	void __iomem *bar_memmap_p;
13859 	uint32_t db_offset;
13860 	uint16_t pci_barset;
13861 
13862 	/* sanity check on queue memory */
13863 	if (!hrq || !drq || !cq)
13864 		return -ENODEV;
13865 	if (!phba->sli4_hba.pc_sli4_params.supported)
13866 		hw_page_size = SLI4_PAGE_SIZE;
13867 
13868 	if (hrq->entry_count != drq->entry_count)
13869 		return -EINVAL;
13870 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13871 	if (!mbox)
13872 		return -ENOMEM;
13873 	length = (sizeof(struct lpfc_mbx_rq_create) -
13874 		  sizeof(struct lpfc_sli4_cfg_mhdr));
13875 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13876 			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13877 			 length, LPFC_SLI4_MBX_EMBED);
13878 	rq_create = &mbox->u.mqe.un.rq_create;
13879 	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13880 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
13881 	       phba->sli4_hba.pc_sli4_params.rqv);
13882 	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13883 		bf_set(lpfc_rq_context_rqe_count_1,
13884 		       &rq_create->u.request.context,
13885 		       hrq->entry_count);
13886 		rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
13887 		bf_set(lpfc_rq_context_rqe_size,
13888 		       &rq_create->u.request.context,
13889 		       LPFC_RQE_SIZE_8);
13890 		bf_set(lpfc_rq_context_page_size,
13891 		       &rq_create->u.request.context,
13892 		       (PAGE_SIZE/SLI4_PAGE_SIZE));
13893 	} else {
13894 		switch (hrq->entry_count) {
13895 		default:
13896 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13897 					"2535 Unsupported RQ count. (%d)\n",
13898 					hrq->entry_count);
13899 			if (hrq->entry_count < 512) {
13900 				status = -EINVAL;
13901 				goto out;
13902 			}
13903 			/* otherwise default to smallest count (drop through) */
13904 		case 512:
13905 			bf_set(lpfc_rq_context_rqe_count,
13906 			       &rq_create->u.request.context,
13907 			       LPFC_RQ_RING_SIZE_512);
13908 			break;
13909 		case 1024:
13910 			bf_set(lpfc_rq_context_rqe_count,
13911 			       &rq_create->u.request.context,
13912 			       LPFC_RQ_RING_SIZE_1024);
13913 			break;
13914 		case 2048:
13915 			bf_set(lpfc_rq_context_rqe_count,
13916 			       &rq_create->u.request.context,
13917 			       LPFC_RQ_RING_SIZE_2048);
13918 			break;
13919 		case 4096:
13920 			bf_set(lpfc_rq_context_rqe_count,
13921 			       &rq_create->u.request.context,
13922 			       LPFC_RQ_RING_SIZE_4096);
13923 			break;
13924 		}
13925 		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13926 		       LPFC_HDR_BUF_SIZE);
13927 	}
13928 	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13929 	       cq->queue_id);
13930 	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13931 	       hrq->page_count);
13932 	list_for_each_entry(dmabuf, &hrq->page_list, list) {
13933 		memset(dmabuf->virt, 0, hw_page_size);
13934 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13935 					putPaddrLow(dmabuf->phys);
13936 		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13937 					putPaddrHigh(dmabuf->phys);
13938 	}
13939 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13940 		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13941 
13942 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13943 	/* The IOCTL status is embedded in the mailbox subheader. */
13944 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13945 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13946 	if (shdr_status || shdr_add_status || rc) {
13947 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13948 				"2504 RQ_CREATE mailbox failed with "
13949 				"status x%x add_status x%x, mbx status x%x\n",
13950 				shdr_status, shdr_add_status, rc);
13951 		status = -ENXIO;
13952 		goto out;
13953 	}
13954 	hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13955 	if (hrq->queue_id == 0xFFFF) {
13956 		status = -ENXIO;
13957 		goto out;
13958 	}
13959 
13960 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13961 		hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
13962 					&rq_create->u.response);
13963 		if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
13964 		    (hrq->db_format != LPFC_DB_RING_FORMAT)) {
13965 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13966 					"3262 RQ [%d] doorbell format not "
13967 					"supported: x%x\n", hrq->queue_id,
13968 					hrq->db_format);
13969 			status = -EINVAL;
13970 			goto out;
13971 		}
13972 
13973 		pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
13974 				    &rq_create->u.response);
13975 		bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13976 		if (!bar_memmap_p) {
13977 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13978 					"3269 RQ[%d] failed to memmap pci "
13979 					"barset:x%x\n", hrq->queue_id,
13980 					pci_barset);
13981 			status = -ENOMEM;
13982 			goto out;
13983 		}
13984 
13985 		db_offset = rq_create->u.response.doorbell_offset;
13986 		if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
13987 		    (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
13988 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13989 					"3270 RQ[%d] doorbell offset not "
13990 					"supported: x%x\n", hrq->queue_id,
13991 					db_offset);
13992 			status = -EINVAL;
13993 			goto out;
13994 		}
13995 		hrq->db_regaddr = bar_memmap_p + db_offset;
13996 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13997 				"3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
13998 				"format:x%x\n", hrq->queue_id, pci_barset,
13999 				db_offset, hrq->db_format);
14000 	} else {
14001 		hrq->db_format = LPFC_DB_RING_FORMAT;
14002 		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
14003 	}
14004 	hrq->type = LPFC_HRQ;
14005 	hrq->assoc_qid = cq->queue_id;
14006 	hrq->subtype = subtype;
14007 	hrq->host_index = 0;
14008 	hrq->hba_index = 0;
14009 
14010 	/* now create the data queue */
14011 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14012 			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
14013 			 length, LPFC_SLI4_MBX_EMBED);
14014 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
14015 	       phba->sli4_hba.pc_sli4_params.rqv);
14016 	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
14017 		bf_set(lpfc_rq_context_rqe_count_1,
14018 		       &rq_create->u.request.context, hrq->entry_count);
14019 		rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
14020 		bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
14021 		       LPFC_RQE_SIZE_8);
14022 		bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
14023 		       (PAGE_SIZE/SLI4_PAGE_SIZE));
14024 	} else {
14025 		switch (drq->entry_count) {
14026 		default:
14027 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14028 					"2536 Unsupported RQ count. (%d)\n",
14029 					drq->entry_count);
14030 			if (drq->entry_count < 512) {
14031 				status = -EINVAL;
14032 				goto out;
14033 			}
14034 			/* otherwise default to smallest count (drop through) */
14035 		case 512:
14036 			bf_set(lpfc_rq_context_rqe_count,
14037 			       &rq_create->u.request.context,
14038 			       LPFC_RQ_RING_SIZE_512);
14039 			break;
14040 		case 1024:
14041 			bf_set(lpfc_rq_context_rqe_count,
14042 			       &rq_create->u.request.context,
14043 			       LPFC_RQ_RING_SIZE_1024);
14044 			break;
14045 		case 2048:
14046 			bf_set(lpfc_rq_context_rqe_count,
14047 			       &rq_create->u.request.context,
14048 			       LPFC_RQ_RING_SIZE_2048);
14049 			break;
14050 		case 4096:
14051 			bf_set(lpfc_rq_context_rqe_count,
14052 			       &rq_create->u.request.context,
14053 			       LPFC_RQ_RING_SIZE_4096);
14054 			break;
14055 		}
14056 		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
14057 		       LPFC_DATA_BUF_SIZE);
14058 	}
14059 	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
14060 	       cq->queue_id);
14061 	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
14062 	       drq->page_count);
14063 	list_for_each_entry(dmabuf, &drq->page_list, list) {
14064 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14065 					putPaddrLow(dmabuf->phys);
14066 		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14067 					putPaddrHigh(dmabuf->phys);
14068 	}
14069 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14070 		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
14071 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14072 	/* The IOCTL status is embedded in the mailbox subheader. */
14073 	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
14074 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14075 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14076 	if (shdr_status || shdr_add_status || rc) {
14077 		status = -ENXIO;
14078 		goto out;
14079 	}
14080 	drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
14081 	if (drq->queue_id == 0xFFFF) {
14082 		status = -ENXIO;
14083 		goto out;
14084 	}
14085 	drq->type = LPFC_DRQ;
14086 	drq->assoc_qid = cq->queue_id;
14087 	drq->subtype = subtype;
14088 	drq->host_index = 0;
14089 	drq->hba_index = 0;
14090 
14091 	/* link the header and data RQs onto the parent cq child list */
14092 	list_add_tail(&hrq->list, &cq->child_list);
14093 	list_add_tail(&drq->list, &cq->child_list);
14094 
14095 out:
14096 	mempool_free(mbox, phba->mbox_mem_pool);
14097 	return status;
14098 }
14099 
14100 /**
14101  * lpfc_eq_destroy - Destroy an event Queue on the HBA
14102  * @eq: The queue structure associated with the queue to destroy.
14103  *
14104  * This function destroys a queue, as detailed in @eq by sending an mailbox
14105  * command, specific to the type of queue, to the HBA.
14106  *
14107  * The @eq struct is used to get the queue ID of the queue to destroy.
14108  *
14109  * On success this function will return a zero. If the queue destroy mailbox
14110  * command fails this function will return -ENXIO.
14111  **/
14112 int
14113 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
14114 {
14115 	LPFC_MBOXQ_t *mbox;
14116 	int rc, length, status = 0;
14117 	uint32_t shdr_status, shdr_add_status;
14118 	union lpfc_sli4_cfg_shdr *shdr;
14119 
14120 	/* sanity check on queue memory */
14121 	if (!eq)
14122 		return -ENODEV;
14123 	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
14124 	if (!mbox)
14125 		return -ENOMEM;
14126 	length = (sizeof(struct lpfc_mbx_eq_destroy) -
14127 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14128 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14129 			 LPFC_MBOX_OPCODE_EQ_DESTROY,
14130 			 length, LPFC_SLI4_MBX_EMBED);
14131 	bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
14132 	       eq->queue_id);
14133 	mbox->vport = eq->phba->pport;
14134 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14135 
14136 	rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
14137 	/* The IOCTL status is embedded in the mailbox subheader. */
14138 	shdr = (union lpfc_sli4_cfg_shdr *)
14139 		&mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
14140 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14141 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14142 	if (shdr_status || shdr_add_status || rc) {
14143 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14144 				"2505 EQ_DESTROY mailbox failed with "
14145 				"status x%x add_status x%x, mbx status x%x\n",
14146 				shdr_status, shdr_add_status, rc);
14147 		status = -ENXIO;
14148 	}
14149 
14150 	/* Remove eq from any list */
14151 	list_del_init(&eq->list);
14152 	mempool_free(mbox, eq->phba->mbox_mem_pool);
14153 	return status;
14154 }
14155 
14156 /**
14157  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
14158  * @cq: The queue structure associated with the queue to destroy.
14159  *
14160  * This function destroys a queue, as detailed in @cq by sending an mailbox
14161  * command, specific to the type of queue, to the HBA.
14162  *
14163  * The @cq struct is used to get the queue ID of the queue to destroy.
14164  *
14165  * On success this function will return a zero. If the queue destroy mailbox
14166  * command fails this function will return -ENXIO.
14167  **/
14168 int
14169 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
14170 {
14171 	LPFC_MBOXQ_t *mbox;
14172 	int rc, length, status = 0;
14173 	uint32_t shdr_status, shdr_add_status;
14174 	union lpfc_sli4_cfg_shdr *shdr;
14175 
14176 	/* sanity check on queue memory */
14177 	if (!cq)
14178 		return -ENODEV;
14179 	mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
14180 	if (!mbox)
14181 		return -ENOMEM;
14182 	length = (sizeof(struct lpfc_mbx_cq_destroy) -
14183 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14184 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14185 			 LPFC_MBOX_OPCODE_CQ_DESTROY,
14186 			 length, LPFC_SLI4_MBX_EMBED);
14187 	bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
14188 	       cq->queue_id);
14189 	mbox->vport = cq->phba->pport;
14190 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14191 	rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
14192 	/* The IOCTL status is embedded in the mailbox subheader. */
14193 	shdr = (union lpfc_sli4_cfg_shdr *)
14194 		&mbox->u.mqe.un.wq_create.header.cfg_shdr;
14195 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14196 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14197 	if (shdr_status || shdr_add_status || rc) {
14198 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14199 				"2506 CQ_DESTROY mailbox failed with "
14200 				"status x%x add_status x%x, mbx status x%x\n",
14201 				shdr_status, shdr_add_status, rc);
14202 		status = -ENXIO;
14203 	}
14204 	/* Remove cq from any list */
14205 	list_del_init(&cq->list);
14206 	mempool_free(mbox, cq->phba->mbox_mem_pool);
14207 	return status;
14208 }
14209 
14210 /**
14211  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
14212  * @qm: The queue structure associated with the queue to destroy.
14213  *
14214  * This function destroys a queue, as detailed in @mq by sending an mailbox
14215  * command, specific to the type of queue, to the HBA.
14216  *
14217  * The @mq struct is used to get the queue ID of the queue to destroy.
14218  *
14219  * On success this function will return a zero. If the queue destroy mailbox
14220  * command fails this function will return -ENXIO.
14221  **/
14222 int
14223 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
14224 {
14225 	LPFC_MBOXQ_t *mbox;
14226 	int rc, length, status = 0;
14227 	uint32_t shdr_status, shdr_add_status;
14228 	union lpfc_sli4_cfg_shdr *shdr;
14229 
14230 	/* sanity check on queue memory */
14231 	if (!mq)
14232 		return -ENODEV;
14233 	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
14234 	if (!mbox)
14235 		return -ENOMEM;
14236 	length = (sizeof(struct lpfc_mbx_mq_destroy) -
14237 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14238 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14239 			 LPFC_MBOX_OPCODE_MQ_DESTROY,
14240 			 length, LPFC_SLI4_MBX_EMBED);
14241 	bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
14242 	       mq->queue_id);
14243 	mbox->vport = mq->phba->pport;
14244 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14245 	rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
14246 	/* The IOCTL status is embedded in the mailbox subheader. */
14247 	shdr = (union lpfc_sli4_cfg_shdr *)
14248 		&mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
14249 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14250 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14251 	if (shdr_status || shdr_add_status || rc) {
14252 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14253 				"2507 MQ_DESTROY mailbox failed with "
14254 				"status x%x add_status x%x, mbx status x%x\n",
14255 				shdr_status, shdr_add_status, rc);
14256 		status = -ENXIO;
14257 	}
14258 	/* Remove mq from any list */
14259 	list_del_init(&mq->list);
14260 	mempool_free(mbox, mq->phba->mbox_mem_pool);
14261 	return status;
14262 }
14263 
14264 /**
14265  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
14266  * @wq: The queue structure associated with the queue to destroy.
14267  *
14268  * This function destroys a queue, as detailed in @wq by sending an mailbox
14269  * command, specific to the type of queue, to the HBA.
14270  *
14271  * The @wq struct is used to get the queue ID of the queue to destroy.
14272  *
14273  * On success this function will return a zero. If the queue destroy mailbox
14274  * command fails this function will return -ENXIO.
14275  **/
14276 int
14277 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
14278 {
14279 	LPFC_MBOXQ_t *mbox;
14280 	int rc, length, status = 0;
14281 	uint32_t shdr_status, shdr_add_status;
14282 	union lpfc_sli4_cfg_shdr *shdr;
14283 
14284 	/* sanity check on queue memory */
14285 	if (!wq)
14286 		return -ENODEV;
14287 	mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
14288 	if (!mbox)
14289 		return -ENOMEM;
14290 	length = (sizeof(struct lpfc_mbx_wq_destroy) -
14291 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14292 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14293 			 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
14294 			 length, LPFC_SLI4_MBX_EMBED);
14295 	bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
14296 	       wq->queue_id);
14297 	mbox->vport = wq->phba->pport;
14298 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14299 	rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
14300 	shdr = (union lpfc_sli4_cfg_shdr *)
14301 		&mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
14302 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14303 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14304 	if (shdr_status || shdr_add_status || rc) {
14305 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14306 				"2508 WQ_DESTROY mailbox failed with "
14307 				"status x%x add_status x%x, mbx status x%x\n",
14308 				shdr_status, shdr_add_status, rc);
14309 		status = -ENXIO;
14310 	}
14311 	/* Remove wq from any list */
14312 	list_del_init(&wq->list);
14313 	mempool_free(mbox, wq->phba->mbox_mem_pool);
14314 	return status;
14315 }
14316 
14317 /**
14318  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
14319  * @rq: The queue structure associated with the queue to destroy.
14320  *
14321  * This function destroys a queue, as detailed in @rq by sending an mailbox
14322  * command, specific to the type of queue, to the HBA.
14323  *
14324  * The @rq struct is used to get the queue ID of the queue to destroy.
14325  *
14326  * On success this function will return a zero. If the queue destroy mailbox
14327  * command fails this function will return -ENXIO.
14328  **/
14329 int
14330 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14331 		struct lpfc_queue *drq)
14332 {
14333 	LPFC_MBOXQ_t *mbox;
14334 	int rc, length, status = 0;
14335 	uint32_t shdr_status, shdr_add_status;
14336 	union lpfc_sli4_cfg_shdr *shdr;
14337 
14338 	/* sanity check on queue memory */
14339 	if (!hrq || !drq)
14340 		return -ENODEV;
14341 	mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
14342 	if (!mbox)
14343 		return -ENOMEM;
14344 	length = (sizeof(struct lpfc_mbx_rq_destroy) -
14345 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14346 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14347 			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
14348 			 length, LPFC_SLI4_MBX_EMBED);
14349 	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14350 	       hrq->queue_id);
14351 	mbox->vport = hrq->phba->pport;
14352 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14353 	rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
14354 	/* The IOCTL status is embedded in the mailbox subheader. */
14355 	shdr = (union lpfc_sli4_cfg_shdr *)
14356 		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14357 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14358 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14359 	if (shdr_status || shdr_add_status || rc) {
14360 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14361 				"2509 RQ_DESTROY mailbox failed with "
14362 				"status x%x add_status x%x, mbx status x%x\n",
14363 				shdr_status, shdr_add_status, rc);
14364 		if (rc != MBX_TIMEOUT)
14365 			mempool_free(mbox, hrq->phba->mbox_mem_pool);
14366 		return -ENXIO;
14367 	}
14368 	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14369 	       drq->queue_id);
14370 	rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
14371 	shdr = (union lpfc_sli4_cfg_shdr *)
14372 		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14373 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14374 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14375 	if (shdr_status || shdr_add_status || rc) {
14376 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14377 				"2510 RQ_DESTROY mailbox failed with "
14378 				"status x%x add_status x%x, mbx status x%x\n",
14379 				shdr_status, shdr_add_status, rc);
14380 		status = -ENXIO;
14381 	}
14382 	list_del_init(&hrq->list);
14383 	list_del_init(&drq->list);
14384 	mempool_free(mbox, hrq->phba->mbox_mem_pool);
14385 	return status;
14386 }
14387 
14388 /**
14389  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
14390  * @phba: The virtual port for which this call being executed.
14391  * @pdma_phys_addr0: Physical address of the 1st SGL page.
14392  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
14393  * @xritag: the xritag that ties this io to the SGL pages.
14394  *
14395  * This routine will post the sgl pages for the IO that has the xritag
14396  * that is in the iocbq structure. The xritag is assigned during iocbq
14397  * creation and persists for as long as the driver is loaded.
14398  * if the caller has fewer than 256 scatter gather segments to map then
14399  * pdma_phys_addr1 should be 0.
14400  * If the caller needs to map more than 256 scatter gather segment then
14401  * pdma_phys_addr1 should be a valid physical address.
14402  * physical address for SGLs must be 64 byte aligned.
14403  * If you are going to map 2 SGL's then the first one must have 256 entries
14404  * the second sgl can have between 1 and 256 entries.
14405  *
14406  * Return codes:
14407  * 	0 - Success
14408  * 	-ENXIO, -ENOMEM - Failure
14409  **/
14410 int
14411 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
14412 		dma_addr_t pdma_phys_addr0,
14413 		dma_addr_t pdma_phys_addr1,
14414 		uint16_t xritag)
14415 {
14416 	struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
14417 	LPFC_MBOXQ_t *mbox;
14418 	int rc;
14419 	uint32_t shdr_status, shdr_add_status;
14420 	uint32_t mbox_tmo;
14421 	union lpfc_sli4_cfg_shdr *shdr;
14422 
14423 	if (xritag == NO_XRI) {
14424 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14425 				"0364 Invalid param:\n");
14426 		return -EINVAL;
14427 	}
14428 
14429 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14430 	if (!mbox)
14431 		return -ENOMEM;
14432 
14433 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14434 			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
14435 			sizeof(struct lpfc_mbx_post_sgl_pages) -
14436 			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
14437 
14438 	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
14439 				&mbox->u.mqe.un.post_sgl_pages;
14440 	bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
14441 	bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
14442 
14443 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo	=
14444 				cpu_to_le32(putPaddrLow(pdma_phys_addr0));
14445 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
14446 				cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
14447 
14448 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo	=
14449 				cpu_to_le32(putPaddrLow(pdma_phys_addr1));
14450 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
14451 				cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
14452 	if (!phba->sli4_hba.intr_enable)
14453 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14454 	else {
14455 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14456 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14457 	}
14458 	/* The IOCTL status is embedded in the mailbox subheader. */
14459 	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
14460 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14461 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14462 	if (rc != MBX_TIMEOUT)
14463 		mempool_free(mbox, phba->mbox_mem_pool);
14464 	if (shdr_status || shdr_add_status || rc) {
14465 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14466 				"2511 POST_SGL mailbox failed with "
14467 				"status x%x add_status x%x, mbx status x%x\n",
14468 				shdr_status, shdr_add_status, rc);
14469 	}
14470 	return 0;
14471 }
14472 
14473 /**
14474  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
14475  * @phba: pointer to lpfc hba data structure.
14476  *
14477  * This routine is invoked to post rpi header templates to the
14478  * HBA consistent with the SLI-4 interface spec.  This routine
14479  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14480  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14481  *
14482  * Returns
14483  *	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
14484  *	LPFC_RPI_ALLOC_ERROR if no rpis are available.
14485  **/
14486 static uint16_t
14487 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
14488 {
14489 	unsigned long xri;
14490 
14491 	/*
14492 	 * Fetch the next logical xri.  Because this index is logical,
14493 	 * the driver starts at 0 each time.
14494 	 */
14495 	spin_lock_irq(&phba->hbalock);
14496 	xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
14497 				 phba->sli4_hba.max_cfg_param.max_xri, 0);
14498 	if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
14499 		spin_unlock_irq(&phba->hbalock);
14500 		return NO_XRI;
14501 	} else {
14502 		set_bit(xri, phba->sli4_hba.xri_bmask);
14503 		phba->sli4_hba.max_cfg_param.xri_used++;
14504 	}
14505 	spin_unlock_irq(&phba->hbalock);
14506 	return xri;
14507 }
14508 
14509 /**
14510  * lpfc_sli4_free_xri - Release an xri for reuse.
14511  * @phba: pointer to lpfc hba data structure.
14512  *
14513  * This routine is invoked to release an xri to the pool of
14514  * available rpis maintained by the driver.
14515  **/
14516 static void
14517 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
14518 {
14519 	if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
14520 		phba->sli4_hba.max_cfg_param.xri_used--;
14521 	}
14522 }
14523 
14524 /**
14525  * lpfc_sli4_free_xri - Release an xri for reuse.
14526  * @phba: pointer to lpfc hba data structure.
14527  *
14528  * This routine is invoked to release an xri to the pool of
14529  * available rpis maintained by the driver.
14530  **/
14531 void
14532 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
14533 {
14534 	spin_lock_irq(&phba->hbalock);
14535 	__lpfc_sli4_free_xri(phba, xri);
14536 	spin_unlock_irq(&phba->hbalock);
14537 }
14538 
14539 /**
14540  * lpfc_sli4_next_xritag - Get an xritag for the io
14541  * @phba: Pointer to HBA context object.
14542  *
14543  * This function gets an xritag for the iocb. If there is no unused xritag
14544  * it will return 0xffff.
14545  * The function returns the allocated xritag if successful, else returns zero.
14546  * Zero is not a valid xritag.
14547  * The caller is not required to hold any lock.
14548  **/
14549 uint16_t
14550 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
14551 {
14552 	uint16_t xri_index;
14553 
14554 	xri_index = lpfc_sli4_alloc_xri(phba);
14555 	if (xri_index == NO_XRI)
14556 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14557 				"2004 Failed to allocate XRI.last XRITAG is %d"
14558 				" Max XRI is %d, Used XRI is %d\n",
14559 				xri_index,
14560 				phba->sli4_hba.max_cfg_param.max_xri,
14561 				phba->sli4_hba.max_cfg_param.xri_used);
14562 	return xri_index;
14563 }
14564 
14565 /**
14566  * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
14567  * @phba: pointer to lpfc hba data structure.
14568  * @post_sgl_list: pointer to els sgl entry list.
14569  * @count: number of els sgl entries on the list.
14570  *
14571  * This routine is invoked to post a block of driver's sgl pages to the
14572  * HBA using non-embedded mailbox command. No Lock is held. This routine
14573  * is only called when the driver is loading and after all IO has been
14574  * stopped.
14575  **/
14576 static int
14577 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
14578 			    struct list_head *post_sgl_list,
14579 			    int post_cnt)
14580 {
14581 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
14582 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
14583 	struct sgl_page_pairs *sgl_pg_pairs;
14584 	void *viraddr;
14585 	LPFC_MBOXQ_t *mbox;
14586 	uint32_t reqlen, alloclen, pg_pairs;
14587 	uint32_t mbox_tmo;
14588 	uint16_t xritag_start = 0;
14589 	int rc = 0;
14590 	uint32_t shdr_status, shdr_add_status;
14591 	union lpfc_sli4_cfg_shdr *shdr;
14592 
14593 	reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
14594 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
14595 	if (reqlen > SLI4_PAGE_SIZE) {
14596 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
14597 				"2559 Block sgl registration required DMA "
14598 				"size (%d) great than a page\n", reqlen);
14599 		return -ENOMEM;
14600 	}
14601 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14602 	if (!mbox)
14603 		return -ENOMEM;
14604 
14605 	/* Allocate DMA memory and set up the non-embedded mailbox command */
14606 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14607 			 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
14608 			 LPFC_SLI4_MBX_NEMBED);
14609 
14610 	if (alloclen < reqlen) {
14611 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14612 				"0285 Allocated DMA memory size (%d) is "
14613 				"less than the requested DMA memory "
14614 				"size (%d)\n", alloclen, reqlen);
14615 		lpfc_sli4_mbox_cmd_free(phba, mbox);
14616 		return -ENOMEM;
14617 	}
14618 	/* Set up the SGL pages in the non-embedded DMA pages */
14619 	viraddr = mbox->sge_array->addr[0];
14620 	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
14621 	sgl_pg_pairs = &sgl->sgl_pg_pairs;
14622 
14623 	pg_pairs = 0;
14624 	list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
14625 		/* Set up the sge entry */
14626 		sgl_pg_pairs->sgl_pg0_addr_lo =
14627 				cpu_to_le32(putPaddrLow(sglq_entry->phys));
14628 		sgl_pg_pairs->sgl_pg0_addr_hi =
14629 				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
14630 		sgl_pg_pairs->sgl_pg1_addr_lo =
14631 				cpu_to_le32(putPaddrLow(0));
14632 		sgl_pg_pairs->sgl_pg1_addr_hi =
14633 				cpu_to_le32(putPaddrHigh(0));
14634 
14635 		/* Keep the first xritag on the list */
14636 		if (pg_pairs == 0)
14637 			xritag_start = sglq_entry->sli4_xritag;
14638 		sgl_pg_pairs++;
14639 		pg_pairs++;
14640 	}
14641 
14642 	/* Complete initialization and perform endian conversion. */
14643 	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
14644 	bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
14645 	sgl->word0 = cpu_to_le32(sgl->word0);
14646 	if (!phba->sli4_hba.intr_enable)
14647 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14648 	else {
14649 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14650 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14651 	}
14652 	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
14653 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14654 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14655 	if (rc != MBX_TIMEOUT)
14656 		lpfc_sli4_mbox_cmd_free(phba, mbox);
14657 	if (shdr_status || shdr_add_status || rc) {
14658 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14659 				"2513 POST_SGL_BLOCK mailbox command failed "
14660 				"status x%x add_status x%x mbx status x%x\n",
14661 				shdr_status, shdr_add_status, rc);
14662 		rc = -ENXIO;
14663 	}
14664 	return rc;
14665 }
14666 
14667 /**
14668  * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
14669  * @phba: pointer to lpfc hba data structure.
14670  * @sblist: pointer to scsi buffer list.
14671  * @count: number of scsi buffers on the list.
14672  *
14673  * This routine is invoked to post a block of @count scsi sgl pages from a
14674  * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
14675  * No Lock is held.
14676  *
14677  **/
14678 int
14679 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
14680 			      struct list_head *sblist,
14681 			      int count)
14682 {
14683 	struct lpfc_scsi_buf *psb;
14684 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
14685 	struct sgl_page_pairs *sgl_pg_pairs;
14686 	void *viraddr;
14687 	LPFC_MBOXQ_t *mbox;
14688 	uint32_t reqlen, alloclen, pg_pairs;
14689 	uint32_t mbox_tmo;
14690 	uint16_t xritag_start = 0;
14691 	int rc = 0;
14692 	uint32_t shdr_status, shdr_add_status;
14693 	dma_addr_t pdma_phys_bpl1;
14694 	union lpfc_sli4_cfg_shdr *shdr;
14695 
14696 	/* Calculate the requested length of the dma memory */
14697 	reqlen = count * sizeof(struct sgl_page_pairs) +
14698 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
14699 	if (reqlen > SLI4_PAGE_SIZE) {
14700 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
14701 				"0217 Block sgl registration required DMA "
14702 				"size (%d) great than a page\n", reqlen);
14703 		return -ENOMEM;
14704 	}
14705 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14706 	if (!mbox) {
14707 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14708 				"0283 Failed to allocate mbox cmd memory\n");
14709 		return -ENOMEM;
14710 	}
14711 
14712 	/* Allocate DMA memory and set up the non-embedded mailbox command */
14713 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14714 				LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
14715 				LPFC_SLI4_MBX_NEMBED);
14716 
14717 	if (alloclen < reqlen) {
14718 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14719 				"2561 Allocated DMA memory size (%d) is "
14720 				"less than the requested DMA memory "
14721 				"size (%d)\n", alloclen, reqlen);
14722 		lpfc_sli4_mbox_cmd_free(phba, mbox);
14723 		return -ENOMEM;
14724 	}
14725 
14726 	/* Get the first SGE entry from the non-embedded DMA memory */
14727 	viraddr = mbox->sge_array->addr[0];
14728 
14729 	/* Set up the SGL pages in the non-embedded DMA pages */
14730 	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
14731 	sgl_pg_pairs = &sgl->sgl_pg_pairs;
14732 
14733 	pg_pairs = 0;
14734 	list_for_each_entry(psb, sblist, list) {
14735 		/* Set up the sge entry */
14736 		sgl_pg_pairs->sgl_pg0_addr_lo =
14737 			cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
14738 		sgl_pg_pairs->sgl_pg0_addr_hi =
14739 			cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
14740 		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
14741 			pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
14742 		else
14743 			pdma_phys_bpl1 = 0;
14744 		sgl_pg_pairs->sgl_pg1_addr_lo =
14745 			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
14746 		sgl_pg_pairs->sgl_pg1_addr_hi =
14747 			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
14748 		/* Keep the first xritag on the list */
14749 		if (pg_pairs == 0)
14750 			xritag_start = psb->cur_iocbq.sli4_xritag;
14751 		sgl_pg_pairs++;
14752 		pg_pairs++;
14753 	}
14754 	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
14755 	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
14756 	/* Perform endian conversion if necessary */
14757 	sgl->word0 = cpu_to_le32(sgl->word0);
14758 
14759 	if (!phba->sli4_hba.intr_enable)
14760 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14761 	else {
14762 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14763 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14764 	}
14765 	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
14766 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14767 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14768 	if (rc != MBX_TIMEOUT)
14769 		lpfc_sli4_mbox_cmd_free(phba, mbox);
14770 	if (shdr_status || shdr_add_status || rc) {
14771 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14772 				"2564 POST_SGL_BLOCK mailbox command failed "
14773 				"status x%x add_status x%x mbx status x%x\n",
14774 				shdr_status, shdr_add_status, rc);
14775 		rc = -ENXIO;
14776 	}
14777 	return rc;
14778 }
14779 
14780 /**
14781  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
14782  * @phba: pointer to lpfc_hba struct that the frame was received on
14783  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14784  *
14785  * This function checks the fields in the @fc_hdr to see if the FC frame is a
14786  * valid type of frame that the LPFC driver will handle. This function will
14787  * return a zero if the frame is a valid frame or a non zero value when the
14788  * frame does not pass the check.
14789  **/
14790 static int
14791 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
14792 {
14793 	/*  make rctl_names static to save stack space */
14794 	static char *rctl_names[] = FC_RCTL_NAMES_INIT;
14795 	char *type_names[] = FC_TYPE_NAMES_INIT;
14796 	struct fc_vft_header *fc_vft_hdr;
14797 	uint32_t *header = (uint32_t *) fc_hdr;
14798 
14799 	switch (fc_hdr->fh_r_ctl) {
14800 	case FC_RCTL_DD_UNCAT:		/* uncategorized information */
14801 	case FC_RCTL_DD_SOL_DATA:	/* solicited data */
14802 	case FC_RCTL_DD_UNSOL_CTL:	/* unsolicited control */
14803 	case FC_RCTL_DD_SOL_CTL:	/* solicited control or reply */
14804 	case FC_RCTL_DD_UNSOL_DATA:	/* unsolicited data */
14805 	case FC_RCTL_DD_DATA_DESC:	/* data descriptor */
14806 	case FC_RCTL_DD_UNSOL_CMD:	/* unsolicited command */
14807 	case FC_RCTL_DD_CMD_STATUS:	/* command status */
14808 	case FC_RCTL_ELS_REQ:	/* extended link services request */
14809 	case FC_RCTL_ELS_REP:	/* extended link services reply */
14810 	case FC_RCTL_ELS4_REQ:	/* FC-4 ELS request */
14811 	case FC_RCTL_ELS4_REP:	/* FC-4 ELS reply */
14812 	case FC_RCTL_BA_NOP:  	/* basic link service NOP */
14813 	case FC_RCTL_BA_ABTS: 	/* basic link service abort */
14814 	case FC_RCTL_BA_RMC: 	/* remove connection */
14815 	case FC_RCTL_BA_ACC:	/* basic accept */
14816 	case FC_RCTL_BA_RJT:	/* basic reject */
14817 	case FC_RCTL_BA_PRMT:
14818 	case FC_RCTL_ACK_1:	/* acknowledge_1 */
14819 	case FC_RCTL_ACK_0:	/* acknowledge_0 */
14820 	case FC_RCTL_P_RJT:	/* port reject */
14821 	case FC_RCTL_F_RJT:	/* fabric reject */
14822 	case FC_RCTL_P_BSY:	/* port busy */
14823 	case FC_RCTL_F_BSY:	/* fabric busy to data frame */
14824 	case FC_RCTL_F_BSYL:	/* fabric busy to link control frame */
14825 	case FC_RCTL_LCR:	/* link credit reset */
14826 	case FC_RCTL_END:	/* end */
14827 		break;
14828 	case FC_RCTL_VFTH:	/* Virtual Fabric tagging Header */
14829 		fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
14830 		fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
14831 		return lpfc_fc_frame_check(phba, fc_hdr);
14832 	default:
14833 		goto drop;
14834 	}
14835 	switch (fc_hdr->fh_type) {
14836 	case FC_TYPE_BLS:
14837 	case FC_TYPE_ELS:
14838 	case FC_TYPE_FCP:
14839 	case FC_TYPE_CT:
14840 		break;
14841 	case FC_TYPE_IP:
14842 	case FC_TYPE_ILS:
14843 	default:
14844 		goto drop;
14845 	}
14846 
14847 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
14848 			"2538 Received frame rctl:%s (x%x), type:%s (x%x), "
14849 			"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
14850 			rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
14851 			type_names[fc_hdr->fh_type], fc_hdr->fh_type,
14852 			be32_to_cpu(header[0]), be32_to_cpu(header[1]),
14853 			be32_to_cpu(header[2]), be32_to_cpu(header[3]),
14854 			be32_to_cpu(header[4]), be32_to_cpu(header[5]),
14855 			be32_to_cpu(header[6]));
14856 	return 0;
14857 drop:
14858 	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
14859 			"2539 Dropped frame rctl:%s type:%s\n",
14860 			rctl_names[fc_hdr->fh_r_ctl],
14861 			type_names[fc_hdr->fh_type]);
14862 	return 1;
14863 }
14864 
14865 /**
14866  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
14867  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14868  *
14869  * This function processes the FC header to retrieve the VFI from the VF
14870  * header, if one exists. This function will return the VFI if one exists
14871  * or 0 if no VSAN Header exists.
14872  **/
14873 static uint32_t
14874 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
14875 {
14876 	struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
14877 
14878 	if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
14879 		return 0;
14880 	return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
14881 }
14882 
14883 /**
14884  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
14885  * @phba: Pointer to the HBA structure to search for the vport on
14886  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14887  * @fcfi: The FC Fabric ID that the frame came from
14888  *
14889  * This function searches the @phba for a vport that matches the content of the
14890  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
14891  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
14892  * returns the matching vport pointer or NULL if unable to match frame to a
14893  * vport.
14894  **/
14895 static struct lpfc_vport *
14896 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
14897 		       uint16_t fcfi)
14898 {
14899 	struct lpfc_vport **vports;
14900 	struct lpfc_vport *vport = NULL;
14901 	int i;
14902 	uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
14903 			fc_hdr->fh_d_id[1] << 8 |
14904 			fc_hdr->fh_d_id[2]);
14905 
14906 	if (did == Fabric_DID)
14907 		return phba->pport;
14908 	if ((phba->pport->fc_flag & FC_PT2PT) &&
14909 		!(phba->link_state == LPFC_HBA_READY))
14910 		return phba->pport;
14911 
14912 	vports = lpfc_create_vport_work_array(phba);
14913 	if (vports != NULL)
14914 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14915 			if (phba->fcf.fcfi == fcfi &&
14916 			    vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
14917 			    vports[i]->fc_myDID == did) {
14918 				vport = vports[i];
14919 				break;
14920 			}
14921 		}
14922 	lpfc_destroy_vport_work_array(phba, vports);
14923 	return vport;
14924 }
14925 
14926 /**
14927  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
14928  * @vport: The vport to work on.
14929  *
14930  * This function updates the receive sequence time stamp for this vport. The
14931  * receive sequence time stamp indicates the time that the last frame of the
14932  * the sequence that has been idle for the longest amount of time was received.
14933  * the driver uses this time stamp to indicate if any received sequences have
14934  * timed out.
14935  **/
14936 static void
14937 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
14938 {
14939 	struct lpfc_dmabuf *h_buf;
14940 	struct hbq_dmabuf *dmabuf = NULL;
14941 
14942 	/* get the oldest sequence on the rcv list */
14943 	h_buf = list_get_first(&vport->rcv_buffer_list,
14944 			       struct lpfc_dmabuf, list);
14945 	if (!h_buf)
14946 		return;
14947 	dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14948 	vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
14949 }
14950 
14951 /**
14952  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
14953  * @vport: The vport that the received sequences were sent to.
14954  *
14955  * This function cleans up all outstanding received sequences. This is called
14956  * by the driver when a link event or user action invalidates all the received
14957  * sequences.
14958  **/
14959 void
14960 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
14961 {
14962 	struct lpfc_dmabuf *h_buf, *hnext;
14963 	struct lpfc_dmabuf *d_buf, *dnext;
14964 	struct hbq_dmabuf *dmabuf = NULL;
14965 
14966 	/* start with the oldest sequence on the rcv list */
14967 	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14968 		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14969 		list_del_init(&dmabuf->hbuf.list);
14970 		list_for_each_entry_safe(d_buf, dnext,
14971 					 &dmabuf->dbuf.list, list) {
14972 			list_del_init(&d_buf->list);
14973 			lpfc_in_buf_free(vport->phba, d_buf);
14974 		}
14975 		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14976 	}
14977 }
14978 
14979 /**
14980  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
14981  * @vport: The vport that the received sequences were sent to.
14982  *
14983  * This function determines whether any received sequences have timed out by
14984  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
14985  * indicates that there is at least one timed out sequence this routine will
14986  * go through the received sequences one at a time from most inactive to most
14987  * active to determine which ones need to be cleaned up. Once it has determined
14988  * that a sequence needs to be cleaned up it will simply free up the resources
14989  * without sending an abort.
14990  **/
14991 void
14992 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
14993 {
14994 	struct lpfc_dmabuf *h_buf, *hnext;
14995 	struct lpfc_dmabuf *d_buf, *dnext;
14996 	struct hbq_dmabuf *dmabuf = NULL;
14997 	unsigned long timeout;
14998 	int abort_count = 0;
14999 
15000 	timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
15001 		   vport->rcv_buffer_time_stamp);
15002 	if (list_empty(&vport->rcv_buffer_list) ||
15003 	    time_before(jiffies, timeout))
15004 		return;
15005 	/* start with the oldest sequence on the rcv list */
15006 	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
15007 		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15008 		timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
15009 			   dmabuf->time_stamp);
15010 		if (time_before(jiffies, timeout))
15011 			break;
15012 		abort_count++;
15013 		list_del_init(&dmabuf->hbuf.list);
15014 		list_for_each_entry_safe(d_buf, dnext,
15015 					 &dmabuf->dbuf.list, list) {
15016 			list_del_init(&d_buf->list);
15017 			lpfc_in_buf_free(vport->phba, d_buf);
15018 		}
15019 		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
15020 	}
15021 	if (abort_count)
15022 		lpfc_update_rcv_time_stamp(vport);
15023 }
15024 
15025 /**
15026  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
15027  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
15028  *
15029  * This function searches through the existing incomplete sequences that have
15030  * been sent to this @vport. If the frame matches one of the incomplete
15031  * sequences then the dbuf in the @dmabuf is added to the list of frames that
15032  * make up that sequence. If no sequence is found that matches this frame then
15033  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
15034  * This function returns a pointer to the first dmabuf in the sequence list that
15035  * the frame was linked to.
15036  **/
15037 static struct hbq_dmabuf *
15038 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15039 {
15040 	struct fc_frame_header *new_hdr;
15041 	struct fc_frame_header *temp_hdr;
15042 	struct lpfc_dmabuf *d_buf;
15043 	struct lpfc_dmabuf *h_buf;
15044 	struct hbq_dmabuf *seq_dmabuf = NULL;
15045 	struct hbq_dmabuf *temp_dmabuf = NULL;
15046 	uint8_t	found = 0;
15047 
15048 	INIT_LIST_HEAD(&dmabuf->dbuf.list);
15049 	dmabuf->time_stamp = jiffies;
15050 	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15051 
15052 	/* Use the hdr_buf to find the sequence that this frame belongs to */
15053 	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
15054 		temp_hdr = (struct fc_frame_header *)h_buf->virt;
15055 		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
15056 		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
15057 		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
15058 			continue;
15059 		/* found a pending sequence that matches this frame */
15060 		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15061 		break;
15062 	}
15063 	if (!seq_dmabuf) {
15064 		/*
15065 		 * This indicates first frame received for this sequence.
15066 		 * Queue the buffer on the vport's rcv_buffer_list.
15067 		 */
15068 		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
15069 		lpfc_update_rcv_time_stamp(vport);
15070 		return dmabuf;
15071 	}
15072 	temp_hdr = seq_dmabuf->hbuf.virt;
15073 	if (be16_to_cpu(new_hdr->fh_seq_cnt) <
15074 		be16_to_cpu(temp_hdr->fh_seq_cnt)) {
15075 		list_del_init(&seq_dmabuf->hbuf.list);
15076 		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
15077 		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
15078 		lpfc_update_rcv_time_stamp(vport);
15079 		return dmabuf;
15080 	}
15081 	/* move this sequence to the tail to indicate a young sequence */
15082 	list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
15083 	seq_dmabuf->time_stamp = jiffies;
15084 	lpfc_update_rcv_time_stamp(vport);
15085 	if (list_empty(&seq_dmabuf->dbuf.list)) {
15086 		temp_hdr = dmabuf->hbuf.virt;
15087 		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
15088 		return seq_dmabuf;
15089 	}
15090 	/* find the correct place in the sequence to insert this frame */
15091 	d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
15092 	while (!found) {
15093 		temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15094 		temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
15095 		/*
15096 		 * If the frame's sequence count is greater than the frame on
15097 		 * the list then insert the frame right after this frame
15098 		 */
15099 		if (be16_to_cpu(new_hdr->fh_seq_cnt) >
15100 			be16_to_cpu(temp_hdr->fh_seq_cnt)) {
15101 			list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
15102 			found = 1;
15103 			break;
15104 		}
15105 
15106 		if (&d_buf->list == &seq_dmabuf->dbuf.list)
15107 			break;
15108 		d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
15109 	}
15110 
15111 	if (found)
15112 		return seq_dmabuf;
15113 	return NULL;
15114 }
15115 
15116 /**
15117  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
15118  * @vport: pointer to a vitural port
15119  * @dmabuf: pointer to a dmabuf that describes the FC sequence
15120  *
15121  * This function tries to abort from the partially assembed sequence, described
15122  * by the information from basic abbort @dmabuf. It checks to see whether such
15123  * partially assembled sequence held by the driver. If so, it shall free up all
15124  * the frames from the partially assembled sequence.
15125  *
15126  * Return
15127  * true  -- if there is matching partially assembled sequence present and all
15128  *          the frames freed with the sequence;
15129  * false -- if there is no matching partially assembled sequence present so
15130  *          nothing got aborted in the lower layer driver
15131  **/
15132 static bool
15133 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
15134 			    struct hbq_dmabuf *dmabuf)
15135 {
15136 	struct fc_frame_header *new_hdr;
15137 	struct fc_frame_header *temp_hdr;
15138 	struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
15139 	struct hbq_dmabuf *seq_dmabuf = NULL;
15140 
15141 	/* Use the hdr_buf to find the sequence that matches this frame */
15142 	INIT_LIST_HEAD(&dmabuf->dbuf.list);
15143 	INIT_LIST_HEAD(&dmabuf->hbuf.list);
15144 	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15145 	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
15146 		temp_hdr = (struct fc_frame_header *)h_buf->virt;
15147 		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
15148 		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
15149 		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
15150 			continue;
15151 		/* found a pending sequence that matches this frame */
15152 		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15153 		break;
15154 	}
15155 
15156 	/* Free up all the frames from the partially assembled sequence */
15157 	if (seq_dmabuf) {
15158 		list_for_each_entry_safe(d_buf, n_buf,
15159 					 &seq_dmabuf->dbuf.list, list) {
15160 			list_del_init(&d_buf->list);
15161 			lpfc_in_buf_free(vport->phba, d_buf);
15162 		}
15163 		return true;
15164 	}
15165 	return false;
15166 }
15167 
15168 /**
15169  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
15170  * @vport: pointer to a vitural port
15171  * @dmabuf: pointer to a dmabuf that describes the FC sequence
15172  *
15173  * This function tries to abort from the assembed sequence from upper level
15174  * protocol, described by the information from basic abbort @dmabuf. It
15175  * checks to see whether such pending context exists at upper level protocol.
15176  * If so, it shall clean up the pending context.
15177  *
15178  * Return
15179  * true  -- if there is matching pending context of the sequence cleaned
15180  *          at ulp;
15181  * false -- if there is no matching pending context of the sequence present
15182  *          at ulp.
15183  **/
15184 static bool
15185 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15186 {
15187 	struct lpfc_hba *phba = vport->phba;
15188 	int handled;
15189 
15190 	/* Accepting abort at ulp with SLI4 only */
15191 	if (phba->sli_rev < LPFC_SLI_REV4)
15192 		return false;
15193 
15194 	/* Register all caring upper level protocols to attend abort */
15195 	handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
15196 	if (handled)
15197 		return true;
15198 
15199 	return false;
15200 }
15201 
15202 /**
15203  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
15204  * @phba: Pointer to HBA context object.
15205  * @cmd_iocbq: pointer to the command iocbq structure.
15206  * @rsp_iocbq: pointer to the response iocbq structure.
15207  *
15208  * This function handles the sequence abort response iocb command complete
15209  * event. It properly releases the memory allocated to the sequence abort
15210  * accept iocb.
15211  **/
15212 static void
15213 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
15214 			     struct lpfc_iocbq *cmd_iocbq,
15215 			     struct lpfc_iocbq *rsp_iocbq)
15216 {
15217 	struct lpfc_nodelist *ndlp;
15218 
15219 	if (cmd_iocbq) {
15220 		ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
15221 		lpfc_nlp_put(ndlp);
15222 		lpfc_nlp_not_used(ndlp);
15223 		lpfc_sli_release_iocbq(phba, cmd_iocbq);
15224 	}
15225 
15226 	/* Failure means BLS ABORT RSP did not get delivered to remote node*/
15227 	if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
15228 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15229 			"3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
15230 			rsp_iocbq->iocb.ulpStatus,
15231 			rsp_iocbq->iocb.un.ulpWord[4]);
15232 }
15233 
15234 /**
15235  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
15236  * @phba: Pointer to HBA context object.
15237  * @xri: xri id in transaction.
15238  *
15239  * This function validates the xri maps to the known range of XRIs allocated an
15240  * used by the driver.
15241  **/
15242 uint16_t
15243 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
15244 		      uint16_t xri)
15245 {
15246 	uint16_t i;
15247 
15248 	for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
15249 		if (xri == phba->sli4_hba.xri_ids[i])
15250 			return i;
15251 	}
15252 	return NO_XRI;
15253 }
15254 
15255 /**
15256  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
15257  * @phba: Pointer to HBA context object.
15258  * @fc_hdr: pointer to a FC frame header.
15259  *
15260  * This function sends a basic response to a previous unsol sequence abort
15261  * event after aborting the sequence handling.
15262  **/
15263 static void
15264 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
15265 			struct fc_frame_header *fc_hdr, bool aborted)
15266 {
15267 	struct lpfc_hba *phba = vport->phba;
15268 	struct lpfc_iocbq *ctiocb = NULL;
15269 	struct lpfc_nodelist *ndlp;
15270 	uint16_t oxid, rxid, xri, lxri;
15271 	uint32_t sid, fctl;
15272 	IOCB_t *icmd;
15273 	int rc;
15274 
15275 	if (!lpfc_is_link_up(phba))
15276 		return;
15277 
15278 	sid = sli4_sid_from_fc_hdr(fc_hdr);
15279 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
15280 	rxid = be16_to_cpu(fc_hdr->fh_rx_id);
15281 
15282 	ndlp = lpfc_findnode_did(vport, sid);
15283 	if (!ndlp) {
15284 		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
15285 		if (!ndlp) {
15286 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15287 					 "1268 Failed to allocate ndlp for "
15288 					 "oxid:x%x SID:x%x\n", oxid, sid);
15289 			return;
15290 		}
15291 		lpfc_nlp_init(vport, ndlp, sid);
15292 		/* Put ndlp onto pport node list */
15293 		lpfc_enqueue_node(vport, ndlp);
15294 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
15295 		/* re-setup ndlp without removing from node list */
15296 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
15297 		if (!ndlp) {
15298 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15299 					 "3275 Failed to active ndlp found "
15300 					 "for oxid:x%x SID:x%x\n", oxid, sid);
15301 			return;
15302 		}
15303 	}
15304 
15305 	/* Allocate buffer for rsp iocb */
15306 	ctiocb = lpfc_sli_get_iocbq(phba);
15307 	if (!ctiocb)
15308 		return;
15309 
15310 	/* Extract the F_CTL field from FC_HDR */
15311 	fctl = sli4_fctl_from_fc_hdr(fc_hdr);
15312 
15313 	icmd = &ctiocb->iocb;
15314 	icmd->un.xseq64.bdl.bdeSize = 0;
15315 	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
15316 	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
15317 	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
15318 	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
15319 
15320 	/* Fill in the rest of iocb fields */
15321 	icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
15322 	icmd->ulpBdeCount = 0;
15323 	icmd->ulpLe = 1;
15324 	icmd->ulpClass = CLASS3;
15325 	icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
15326 	ctiocb->context1 = lpfc_nlp_get(ndlp);
15327 
15328 	ctiocb->iocb_cmpl = NULL;
15329 	ctiocb->vport = phba->pport;
15330 	ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
15331 	ctiocb->sli4_lxritag = NO_XRI;
15332 	ctiocb->sli4_xritag = NO_XRI;
15333 
15334 	if (fctl & FC_FC_EX_CTX)
15335 		/* Exchange responder sent the abort so we
15336 		 * own the oxid.
15337 		 */
15338 		xri = oxid;
15339 	else
15340 		xri = rxid;
15341 	lxri = lpfc_sli4_xri_inrange(phba, xri);
15342 	if (lxri != NO_XRI)
15343 		lpfc_set_rrq_active(phba, ndlp, lxri,
15344 			(xri == oxid) ? rxid : oxid, 0);
15345 	/* For BA_ABTS from exchange responder, if the logical xri with
15346 	 * the oxid maps to the FCP XRI range, the port no longer has
15347 	 * that exchange context, send a BLS_RJT. Override the IOCB for
15348 	 * a BA_RJT.
15349 	 */
15350 	if ((fctl & FC_FC_EX_CTX) &&
15351 	    (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
15352 		icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15353 		bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15354 		bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15355 		bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15356 	}
15357 
15358 	/* If BA_ABTS failed to abort a partially assembled receive sequence,
15359 	 * the driver no longer has that exchange, send a BLS_RJT. Override
15360 	 * the IOCB for a BA_RJT.
15361 	 */
15362 	if (aborted == false) {
15363 		icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15364 		bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15365 		bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15366 		bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15367 	}
15368 
15369 	if (fctl & FC_FC_EX_CTX) {
15370 		/* ABTS sent by responder to CT exchange, construction
15371 		 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
15372 		 * field and RX_ID from ABTS for RX_ID field.
15373 		 */
15374 		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
15375 	} else {
15376 		/* ABTS sent by initiator to CT exchange, construction
15377 		 * of BA_ACC will need to allocate a new XRI as for the
15378 		 * XRI_TAG field.
15379 		 */
15380 		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
15381 	}
15382 	bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
15383 	bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
15384 
15385 	/* Xmit CT abts response on exchange <xid> */
15386 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
15387 			 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
15388 			 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
15389 
15390 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
15391 	if (rc == IOCB_ERROR) {
15392 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
15393 				 "2925 Failed to issue CT ABTS RSP x%x on "
15394 				 "xri x%x, Data x%x\n",
15395 				 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
15396 				 phba->link_state);
15397 		lpfc_nlp_put(ndlp);
15398 		ctiocb->context1 = NULL;
15399 		lpfc_sli_release_iocbq(phba, ctiocb);
15400 	}
15401 }
15402 
15403 /**
15404  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
15405  * @vport: Pointer to the vport on which this sequence was received
15406  * @dmabuf: pointer to a dmabuf that describes the FC sequence
15407  *
15408  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
15409  * receive sequence is only partially assembed by the driver, it shall abort
15410  * the partially assembled frames for the sequence. Otherwise, if the
15411  * unsolicited receive sequence has been completely assembled and passed to
15412  * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
15413  * unsolicited sequence has been aborted. After that, it will issue a basic
15414  * accept to accept the abort.
15415  **/
15416 static void
15417 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
15418 			     struct hbq_dmabuf *dmabuf)
15419 {
15420 	struct lpfc_hba *phba = vport->phba;
15421 	struct fc_frame_header fc_hdr;
15422 	uint32_t fctl;
15423 	bool aborted;
15424 
15425 	/* Make a copy of fc_hdr before the dmabuf being released */
15426 	memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
15427 	fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
15428 
15429 	if (fctl & FC_FC_EX_CTX) {
15430 		/* ABTS by responder to exchange, no cleanup needed */
15431 		aborted = true;
15432 	} else {
15433 		/* ABTS by initiator to exchange, need to do cleanup */
15434 		aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
15435 		if (aborted == false)
15436 			aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
15437 	}
15438 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
15439 
15440 	/* Respond with BA_ACC or BA_RJT accordingly */
15441 	lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
15442 }
15443 
15444 /**
15445  * lpfc_seq_complete - Indicates if a sequence is complete
15446  * @dmabuf: pointer to a dmabuf that describes the FC sequence
15447  *
15448  * This function checks the sequence, starting with the frame described by
15449  * @dmabuf, to see if all the frames associated with this sequence are present.
15450  * the frames associated with this sequence are linked to the @dmabuf using the
15451  * dbuf list. This function looks for two major things. 1) That the first frame
15452  * has a sequence count of zero. 2) There is a frame with last frame of sequence
15453  * set. 3) That there are no holes in the sequence count. The function will
15454  * return 1 when the sequence is complete, otherwise it will return 0.
15455  **/
15456 static int
15457 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
15458 {
15459 	struct fc_frame_header *hdr;
15460 	struct lpfc_dmabuf *d_buf;
15461 	struct hbq_dmabuf *seq_dmabuf;
15462 	uint32_t fctl;
15463 	int seq_count = 0;
15464 
15465 	hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15466 	/* make sure first fame of sequence has a sequence count of zero */
15467 	if (hdr->fh_seq_cnt != seq_count)
15468 		return 0;
15469 	fctl = (hdr->fh_f_ctl[0] << 16 |
15470 		hdr->fh_f_ctl[1] << 8 |
15471 		hdr->fh_f_ctl[2]);
15472 	/* If last frame of sequence we can return success. */
15473 	if (fctl & FC_FC_END_SEQ)
15474 		return 1;
15475 	list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
15476 		seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15477 		hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15478 		/* If there is a hole in the sequence count then fail. */
15479 		if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
15480 			return 0;
15481 		fctl = (hdr->fh_f_ctl[0] << 16 |
15482 			hdr->fh_f_ctl[1] << 8 |
15483 			hdr->fh_f_ctl[2]);
15484 		/* If last frame of sequence we can return success. */
15485 		if (fctl & FC_FC_END_SEQ)
15486 			return 1;
15487 	}
15488 	return 0;
15489 }
15490 
15491 /**
15492  * lpfc_prep_seq - Prep sequence for ULP processing
15493  * @vport: Pointer to the vport on which this sequence was received
15494  * @dmabuf: pointer to a dmabuf that describes the FC sequence
15495  *
15496  * This function takes a sequence, described by a list of frames, and creates
15497  * a list of iocbq structures to describe the sequence. This iocbq list will be
15498  * used to issue to the generic unsolicited sequence handler. This routine
15499  * returns a pointer to the first iocbq in the list. If the function is unable
15500  * to allocate an iocbq then it throw out the received frames that were not
15501  * able to be described and return a pointer to the first iocbq. If unable to
15502  * allocate any iocbqs (including the first) this function will return NULL.
15503  **/
15504 static struct lpfc_iocbq *
15505 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
15506 {
15507 	struct hbq_dmabuf *hbq_buf;
15508 	struct lpfc_dmabuf *d_buf, *n_buf;
15509 	struct lpfc_iocbq *first_iocbq, *iocbq;
15510 	struct fc_frame_header *fc_hdr;
15511 	uint32_t sid;
15512 	uint32_t len, tot_len;
15513 	struct ulp_bde64 *pbde;
15514 
15515 	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15516 	/* remove from receive buffer list */
15517 	list_del_init(&seq_dmabuf->hbuf.list);
15518 	lpfc_update_rcv_time_stamp(vport);
15519 	/* get the Remote Port's SID */
15520 	sid = sli4_sid_from_fc_hdr(fc_hdr);
15521 	tot_len = 0;
15522 	/* Get an iocbq struct to fill in. */
15523 	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
15524 	if (first_iocbq) {
15525 		/* Initialize the first IOCB. */
15526 		first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
15527 		first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
15528 
15529 		/* Check FC Header to see what TYPE of frame we are rcv'ing */
15530 		if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
15531 			first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
15532 			first_iocbq->iocb.un.rcvels.parmRo =
15533 				sli4_did_from_fc_hdr(fc_hdr);
15534 			first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
15535 		} else
15536 			first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
15537 		first_iocbq->iocb.ulpContext = NO_XRI;
15538 		first_iocbq->iocb.unsli3.rcvsli3.ox_id =
15539 			be16_to_cpu(fc_hdr->fh_ox_id);
15540 		/* iocbq is prepped for internal consumption.  Physical vpi. */
15541 		first_iocbq->iocb.unsli3.rcvsli3.vpi =
15542 			vport->phba->vpi_ids[vport->vpi];
15543 		/* put the first buffer into the first IOCBq */
15544 		tot_len = bf_get(lpfc_rcqe_length,
15545 				       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
15546 
15547 		first_iocbq->context2 = &seq_dmabuf->dbuf;
15548 		first_iocbq->context3 = NULL;
15549 		first_iocbq->iocb.ulpBdeCount = 1;
15550 		if (tot_len > LPFC_DATA_BUF_SIZE)
15551 			first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
15552 							LPFC_DATA_BUF_SIZE;
15553 		else
15554 			first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
15555 
15556 		first_iocbq->iocb.un.rcvels.remoteID = sid;
15557 
15558 		first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
15559 	}
15560 	iocbq = first_iocbq;
15561 	/*
15562 	 * Each IOCBq can have two Buffers assigned, so go through the list
15563 	 * of buffers for this sequence and save two buffers in each IOCBq
15564 	 */
15565 	list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
15566 		if (!iocbq) {
15567 			lpfc_in_buf_free(vport->phba, d_buf);
15568 			continue;
15569 		}
15570 		if (!iocbq->context3) {
15571 			iocbq->context3 = d_buf;
15572 			iocbq->iocb.ulpBdeCount++;
15573 			/* We need to get the size out of the right CQE */
15574 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15575 			len = bf_get(lpfc_rcqe_length,
15576 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
15577 			pbde = (struct ulp_bde64 *)
15578 					&iocbq->iocb.unsli3.sli3Words[4];
15579 			if (len > LPFC_DATA_BUF_SIZE)
15580 				pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
15581 			else
15582 				pbde->tus.f.bdeSize = len;
15583 
15584 			iocbq->iocb.unsli3.rcvsli3.acc_len += len;
15585 			tot_len += len;
15586 		} else {
15587 			iocbq = lpfc_sli_get_iocbq(vport->phba);
15588 			if (!iocbq) {
15589 				if (first_iocbq) {
15590 					first_iocbq->iocb.ulpStatus =
15591 							IOSTAT_FCP_RSP_ERROR;
15592 					first_iocbq->iocb.un.ulpWord[4] =
15593 							IOERR_NO_RESOURCES;
15594 				}
15595 				lpfc_in_buf_free(vport->phba, d_buf);
15596 				continue;
15597 			}
15598 			/* We need to get the size out of the right CQE */
15599 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15600 			len = bf_get(lpfc_rcqe_length,
15601 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
15602 			iocbq->context2 = d_buf;
15603 			iocbq->context3 = NULL;
15604 			iocbq->iocb.ulpBdeCount = 1;
15605 			if (len > LPFC_DATA_BUF_SIZE)
15606 				iocbq->iocb.un.cont64[0].tus.f.bdeSize =
15607 							LPFC_DATA_BUF_SIZE;
15608 			else
15609 				iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
15610 
15611 			tot_len += len;
15612 			iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
15613 
15614 			iocbq->iocb.un.rcvels.remoteID = sid;
15615 			list_add_tail(&iocbq->list, &first_iocbq->list);
15616 		}
15617 	}
15618 	return first_iocbq;
15619 }
15620 
15621 static void
15622 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
15623 			  struct hbq_dmabuf *seq_dmabuf)
15624 {
15625 	struct fc_frame_header *fc_hdr;
15626 	struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
15627 	struct lpfc_hba *phba = vport->phba;
15628 
15629 	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15630 	iocbq = lpfc_prep_seq(vport, seq_dmabuf);
15631 	if (!iocbq) {
15632 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15633 				"2707 Ring %d handler: Failed to allocate "
15634 				"iocb Rctl x%x Type x%x received\n",
15635 				LPFC_ELS_RING,
15636 				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
15637 		return;
15638 	}
15639 	if (!lpfc_complete_unsol_iocb(phba,
15640 				      &phba->sli.ring[LPFC_ELS_RING],
15641 				      iocbq, fc_hdr->fh_r_ctl,
15642 				      fc_hdr->fh_type))
15643 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15644 				"2540 Ring %d handler: unexpected Rctl "
15645 				"x%x Type x%x received\n",
15646 				LPFC_ELS_RING,
15647 				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
15648 
15649 	/* Free iocb created in lpfc_prep_seq */
15650 	list_for_each_entry_safe(curr_iocb, next_iocb,
15651 		&iocbq->list, list) {
15652 		list_del_init(&curr_iocb->list);
15653 		lpfc_sli_release_iocbq(phba, curr_iocb);
15654 	}
15655 	lpfc_sli_release_iocbq(phba, iocbq);
15656 }
15657 
15658 /**
15659  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
15660  * @phba: Pointer to HBA context object.
15661  *
15662  * This function is called with no lock held. This function processes all
15663  * the received buffers and gives it to upper layers when a received buffer
15664  * indicates that it is the final frame in the sequence. The interrupt
15665  * service routine processes received buffers at interrupt contexts and adds
15666  * received dma buffers to the rb_pend_list queue and signals the worker thread.
15667  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
15668  * appropriate receive function when the final frame in a sequence is received.
15669  **/
15670 void
15671 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
15672 				 struct hbq_dmabuf *dmabuf)
15673 {
15674 	struct hbq_dmabuf *seq_dmabuf;
15675 	struct fc_frame_header *fc_hdr;
15676 	struct lpfc_vport *vport;
15677 	uint32_t fcfi;
15678 	uint32_t did;
15679 
15680 	/* Process each received buffer */
15681 	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15682 	/* check to see if this a valid type of frame */
15683 	if (lpfc_fc_frame_check(phba, fc_hdr)) {
15684 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
15685 		return;
15686 	}
15687 	if ((bf_get(lpfc_cqe_code,
15688 		    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
15689 		fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
15690 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
15691 	else
15692 		fcfi = bf_get(lpfc_rcqe_fcf_id,
15693 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
15694 
15695 	vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
15696 	if (!vport) {
15697 		/* throw out the frame */
15698 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
15699 		return;
15700 	}
15701 
15702 	/* d_id this frame is directed to */
15703 	did = sli4_did_from_fc_hdr(fc_hdr);
15704 
15705 	/* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
15706 	if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
15707 		(did != Fabric_DID)) {
15708 		/*
15709 		 * Throw out the frame if we are not pt2pt.
15710 		 * The pt2pt protocol allows for discovery frames
15711 		 * to be received without a registered VPI.
15712 		 */
15713 		if (!(vport->fc_flag & FC_PT2PT) ||
15714 			(phba->link_state == LPFC_HBA_READY)) {
15715 			lpfc_in_buf_free(phba, &dmabuf->dbuf);
15716 			return;
15717 		}
15718 	}
15719 
15720 	/* Handle the basic abort sequence (BA_ABTS) event */
15721 	if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
15722 		lpfc_sli4_handle_unsol_abort(vport, dmabuf);
15723 		return;
15724 	}
15725 
15726 	/* Link this frame */
15727 	seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
15728 	if (!seq_dmabuf) {
15729 		/* unable to add frame to vport - throw it out */
15730 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
15731 		return;
15732 	}
15733 	/* If not last frame in sequence continue processing frames. */
15734 	if (!lpfc_seq_complete(seq_dmabuf))
15735 		return;
15736 
15737 	/* Send the complete sequence to the upper layer protocol */
15738 	lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
15739 }
15740 
15741 /**
15742  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
15743  * @phba: pointer to lpfc hba data structure.
15744  *
15745  * This routine is invoked to post rpi header templates to the
15746  * HBA consistent with the SLI-4 interface spec.  This routine
15747  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15748  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15749  *
15750  * This routine does not require any locks.  It's usage is expected
15751  * to be driver load or reset recovery when the driver is
15752  * sequential.
15753  *
15754  * Return codes
15755  * 	0 - successful
15756  *      -EIO - The mailbox failed to complete successfully.
15757  * 	When this error occurs, the driver is not guaranteed
15758  *	to have any rpi regions posted to the device and
15759  *	must either attempt to repost the regions or take a
15760  *	fatal error.
15761  **/
15762 int
15763 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
15764 {
15765 	struct lpfc_rpi_hdr *rpi_page;
15766 	uint32_t rc = 0;
15767 	uint16_t lrpi = 0;
15768 
15769 	/* SLI4 ports that support extents do not require RPI headers. */
15770 	if (!phba->sli4_hba.rpi_hdrs_in_use)
15771 		goto exit;
15772 	if (phba->sli4_hba.extents_in_use)
15773 		return -EIO;
15774 
15775 	list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
15776 		/*
15777 		 * Assign the rpi headers a physical rpi only if the driver
15778 		 * has not initialized those resources.  A port reset only
15779 		 * needs the headers posted.
15780 		 */
15781 		if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
15782 		    LPFC_RPI_RSRC_RDY)
15783 			rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
15784 
15785 		rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
15786 		if (rc != MBX_SUCCESS) {
15787 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15788 					"2008 Error %d posting all rpi "
15789 					"headers\n", rc);
15790 			rc = -EIO;
15791 			break;
15792 		}
15793 	}
15794 
15795  exit:
15796 	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
15797 	       LPFC_RPI_RSRC_RDY);
15798 	return rc;
15799 }
15800 
15801 /**
15802  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
15803  * @phba: pointer to lpfc hba data structure.
15804  * @rpi_page:  pointer to the rpi memory region.
15805  *
15806  * This routine is invoked to post a single rpi header to the
15807  * HBA consistent with the SLI-4 interface spec.  This memory region
15808  * maps up to 64 rpi context regions.
15809  *
15810  * Return codes
15811  * 	0 - successful
15812  * 	-ENOMEM - No available memory
15813  *      -EIO - The mailbox failed to complete successfully.
15814  **/
15815 int
15816 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
15817 {
15818 	LPFC_MBOXQ_t *mboxq;
15819 	struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
15820 	uint32_t rc = 0;
15821 	uint32_t shdr_status, shdr_add_status;
15822 	union lpfc_sli4_cfg_shdr *shdr;
15823 
15824 	/* SLI4 ports that support extents do not require RPI headers. */
15825 	if (!phba->sli4_hba.rpi_hdrs_in_use)
15826 		return rc;
15827 	if (phba->sli4_hba.extents_in_use)
15828 		return -EIO;
15829 
15830 	/* The port is notified of the header region via a mailbox command. */
15831 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15832 	if (!mboxq) {
15833 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15834 				"2001 Unable to allocate memory for issuing "
15835 				"SLI_CONFIG_SPECIAL mailbox command\n");
15836 		return -ENOMEM;
15837 	}
15838 
15839 	/* Post all rpi memory regions to the port. */
15840 	hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
15841 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
15842 			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
15843 			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
15844 			 sizeof(struct lpfc_sli4_cfg_mhdr),
15845 			 LPFC_SLI4_MBX_EMBED);
15846 
15847 
15848 	/* Post the physical rpi to the port for this rpi header. */
15849 	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
15850 	       rpi_page->start_rpi);
15851 	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
15852 	       hdr_tmpl, rpi_page->page_count);
15853 
15854 	hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
15855 	hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
15856 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15857 	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
15858 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15859 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15860 	if (rc != MBX_TIMEOUT)
15861 		mempool_free(mboxq, phba->mbox_mem_pool);
15862 	if (shdr_status || shdr_add_status || rc) {
15863 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15864 				"2514 POST_RPI_HDR mailbox failed with "
15865 				"status x%x add_status x%x, mbx status x%x\n",
15866 				shdr_status, shdr_add_status, rc);
15867 		rc = -ENXIO;
15868 	}
15869 	return rc;
15870 }
15871 
15872 /**
15873  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
15874  * @phba: pointer to lpfc hba data structure.
15875  *
15876  * This routine is invoked to post rpi header templates to the
15877  * HBA consistent with the SLI-4 interface spec.  This routine
15878  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15879  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15880  *
15881  * Returns
15882  * 	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
15883  * 	LPFC_RPI_ALLOC_ERROR if no rpis are available.
15884  **/
15885 int
15886 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
15887 {
15888 	unsigned long rpi;
15889 	uint16_t max_rpi, rpi_limit;
15890 	uint16_t rpi_remaining, lrpi = 0;
15891 	struct lpfc_rpi_hdr *rpi_hdr;
15892 	unsigned long iflag;
15893 
15894 	/*
15895 	 * Fetch the next logical rpi.  Because this index is logical,
15896 	 * the  driver starts at 0 each time.
15897 	 */
15898 	spin_lock_irqsave(&phba->hbalock, iflag);
15899 	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
15900 	rpi_limit = phba->sli4_hba.next_rpi;
15901 
15902 	rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
15903 	if (rpi >= rpi_limit)
15904 		rpi = LPFC_RPI_ALLOC_ERROR;
15905 	else {
15906 		set_bit(rpi, phba->sli4_hba.rpi_bmask);
15907 		phba->sli4_hba.max_cfg_param.rpi_used++;
15908 		phba->sli4_hba.rpi_count++;
15909 	}
15910 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15911 			"0001 rpi:%x max:%x lim:%x\n",
15912 			(int) rpi, max_rpi, rpi_limit);
15913 
15914 	/*
15915 	 * Don't try to allocate more rpi header regions if the device limit
15916 	 * has been exhausted.
15917 	 */
15918 	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
15919 	    (phba->sli4_hba.rpi_count >= max_rpi)) {
15920 		spin_unlock_irqrestore(&phba->hbalock, iflag);
15921 		return rpi;
15922 	}
15923 
15924 	/*
15925 	 * RPI header postings are not required for SLI4 ports capable of
15926 	 * extents.
15927 	 */
15928 	if (!phba->sli4_hba.rpi_hdrs_in_use) {
15929 		spin_unlock_irqrestore(&phba->hbalock, iflag);
15930 		return rpi;
15931 	}
15932 
15933 	/*
15934 	 * If the driver is running low on rpi resources, allocate another
15935 	 * page now.  Note that the next_rpi value is used because
15936 	 * it represents how many are actually in use whereas max_rpi notes
15937 	 * how many are supported max by the device.
15938 	 */
15939 	rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
15940 	spin_unlock_irqrestore(&phba->hbalock, iflag);
15941 	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
15942 		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
15943 		if (!rpi_hdr) {
15944 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15945 					"2002 Error Could not grow rpi "
15946 					"count\n");
15947 		} else {
15948 			lrpi = rpi_hdr->start_rpi;
15949 			rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
15950 			lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
15951 		}
15952 	}
15953 
15954 	return rpi;
15955 }
15956 
15957 /**
15958  * lpfc_sli4_free_rpi - Release an rpi for reuse.
15959  * @phba: pointer to lpfc hba data structure.
15960  *
15961  * This routine is invoked to release an rpi to the pool of
15962  * available rpis maintained by the driver.
15963  **/
15964 static void
15965 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15966 {
15967 	if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
15968 		phba->sli4_hba.rpi_count--;
15969 		phba->sli4_hba.max_cfg_param.rpi_used--;
15970 	}
15971 }
15972 
15973 /**
15974  * lpfc_sli4_free_rpi - Release an rpi for reuse.
15975  * @phba: pointer to lpfc hba data structure.
15976  *
15977  * This routine is invoked to release an rpi to the pool of
15978  * available rpis maintained by the driver.
15979  **/
15980 void
15981 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15982 {
15983 	spin_lock_irq(&phba->hbalock);
15984 	__lpfc_sli4_free_rpi(phba, rpi);
15985 	spin_unlock_irq(&phba->hbalock);
15986 }
15987 
15988 /**
15989  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
15990  * @phba: pointer to lpfc hba data structure.
15991  *
15992  * This routine is invoked to remove the memory region that
15993  * provided rpi via a bitmask.
15994  **/
15995 void
15996 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
15997 {
15998 	kfree(phba->sli4_hba.rpi_bmask);
15999 	kfree(phba->sli4_hba.rpi_ids);
16000 	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
16001 }
16002 
16003 /**
16004  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
16005  * @phba: pointer to lpfc hba data structure.
16006  *
16007  * This routine is invoked to remove the memory region that
16008  * provided rpi via a bitmask.
16009  **/
16010 int
16011 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
16012 	void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
16013 {
16014 	LPFC_MBOXQ_t *mboxq;
16015 	struct lpfc_hba *phba = ndlp->phba;
16016 	int rc;
16017 
16018 	/* The port is notified of the header region via a mailbox command. */
16019 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16020 	if (!mboxq)
16021 		return -ENOMEM;
16022 
16023 	/* Post all rpi memory regions to the port. */
16024 	lpfc_resume_rpi(mboxq, ndlp);
16025 	if (cmpl) {
16026 		mboxq->mbox_cmpl = cmpl;
16027 		mboxq->context1 = arg;
16028 		mboxq->context2 = ndlp;
16029 	} else
16030 		mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16031 	mboxq->vport = ndlp->vport;
16032 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16033 	if (rc == MBX_NOT_FINISHED) {
16034 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16035 				"2010 Resume RPI Mailbox failed "
16036 				"status %d, mbxStatus x%x\n", rc,
16037 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
16038 		mempool_free(mboxq, phba->mbox_mem_pool);
16039 		return -EIO;
16040 	}
16041 	return 0;
16042 }
16043 
16044 /**
16045  * lpfc_sli4_init_vpi - Initialize a vpi with the port
16046  * @vport: Pointer to the vport for which the vpi is being initialized
16047  *
16048  * This routine is invoked to activate a vpi with the port.
16049  *
16050  * Returns:
16051  *    0 success
16052  *    -Evalue otherwise
16053  **/
16054 int
16055 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
16056 {
16057 	LPFC_MBOXQ_t *mboxq;
16058 	int rc = 0;
16059 	int retval = MBX_SUCCESS;
16060 	uint32_t mbox_tmo;
16061 	struct lpfc_hba *phba = vport->phba;
16062 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16063 	if (!mboxq)
16064 		return -ENOMEM;
16065 	lpfc_init_vpi(phba, mboxq, vport->vpi);
16066 	mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
16067 	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
16068 	if (rc != MBX_SUCCESS) {
16069 		lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
16070 				"2022 INIT VPI Mailbox failed "
16071 				"status %d, mbxStatus x%x\n", rc,
16072 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
16073 		retval = -EIO;
16074 	}
16075 	if (rc != MBX_TIMEOUT)
16076 		mempool_free(mboxq, vport->phba->mbox_mem_pool);
16077 
16078 	return retval;
16079 }
16080 
16081 /**
16082  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
16083  * @phba: pointer to lpfc hba data structure.
16084  * @mboxq: Pointer to mailbox object.
16085  *
16086  * This routine is invoked to manually add a single FCF record. The caller
16087  * must pass a completely initialized FCF_Record.  This routine takes
16088  * care of the nonembedded mailbox operations.
16089  **/
16090 static void
16091 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
16092 {
16093 	void *virt_addr;
16094 	union lpfc_sli4_cfg_shdr *shdr;
16095 	uint32_t shdr_status, shdr_add_status;
16096 
16097 	virt_addr = mboxq->sge_array->addr[0];
16098 	/* The IOCTL status is embedded in the mailbox subheader. */
16099 	shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
16100 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16101 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16102 
16103 	if ((shdr_status || shdr_add_status) &&
16104 		(shdr_status != STATUS_FCF_IN_USE))
16105 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16106 			"2558 ADD_FCF_RECORD mailbox failed with "
16107 			"status x%x add_status x%x\n",
16108 			shdr_status, shdr_add_status);
16109 
16110 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
16111 }
16112 
16113 /**
16114  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
16115  * @phba: pointer to lpfc hba data structure.
16116  * @fcf_record:  pointer to the initialized fcf record to add.
16117  *
16118  * This routine is invoked to manually add a single FCF record. The caller
16119  * must pass a completely initialized FCF_Record.  This routine takes
16120  * care of the nonembedded mailbox operations.
16121  **/
16122 int
16123 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
16124 {
16125 	int rc = 0;
16126 	LPFC_MBOXQ_t *mboxq;
16127 	uint8_t *bytep;
16128 	void *virt_addr;
16129 	struct lpfc_mbx_sge sge;
16130 	uint32_t alloc_len, req_len;
16131 	uint32_t fcfindex;
16132 
16133 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16134 	if (!mboxq) {
16135 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16136 			"2009 Failed to allocate mbox for ADD_FCF cmd\n");
16137 		return -ENOMEM;
16138 	}
16139 
16140 	req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
16141 		  sizeof(uint32_t);
16142 
16143 	/* Allocate DMA memory and set up the non-embedded mailbox command */
16144 	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
16145 				     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
16146 				     req_len, LPFC_SLI4_MBX_NEMBED);
16147 	if (alloc_len < req_len) {
16148 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16149 			"2523 Allocated DMA memory size (x%x) is "
16150 			"less than the requested DMA memory "
16151 			"size (x%x)\n", alloc_len, req_len);
16152 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
16153 		return -ENOMEM;
16154 	}
16155 
16156 	/*
16157 	 * Get the first SGE entry from the non-embedded DMA memory.  This
16158 	 * routine only uses a single SGE.
16159 	 */
16160 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
16161 	virt_addr = mboxq->sge_array->addr[0];
16162 	/*
16163 	 * Configure the FCF record for FCFI 0.  This is the driver's
16164 	 * hardcoded default and gets used in nonFIP mode.
16165 	 */
16166 	fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
16167 	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
16168 	lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
16169 
16170 	/*
16171 	 * Copy the fcf_index and the FCF Record Data. The data starts after
16172 	 * the FCoE header plus word10. The data copy needs to be endian
16173 	 * correct.
16174 	 */
16175 	bytep += sizeof(uint32_t);
16176 	lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
16177 	mboxq->vport = phba->pport;
16178 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
16179 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16180 	if (rc == MBX_NOT_FINISHED) {
16181 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16182 			"2515 ADD_FCF_RECORD mailbox failed with "
16183 			"status 0x%x\n", rc);
16184 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
16185 		rc = -EIO;
16186 	} else
16187 		rc = 0;
16188 
16189 	return rc;
16190 }
16191 
16192 /**
16193  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
16194  * @phba: pointer to lpfc hba data structure.
16195  * @fcf_record:  pointer to the fcf record to write the default data.
16196  * @fcf_index: FCF table entry index.
16197  *
16198  * This routine is invoked to build the driver's default FCF record.  The
16199  * values used are hardcoded.  This routine handles memory initialization.
16200  *
16201  **/
16202 void
16203 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
16204 				struct fcf_record *fcf_record,
16205 				uint16_t fcf_index)
16206 {
16207 	memset(fcf_record, 0, sizeof(struct fcf_record));
16208 	fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
16209 	fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
16210 	fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
16211 	bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
16212 	bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
16213 	bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
16214 	bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
16215 	bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
16216 	bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
16217 	bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
16218 	bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
16219 	bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
16220 	bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
16221 	bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
16222 	bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
16223 	bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
16224 		LPFC_FCF_FPMA | LPFC_FCF_SPMA);
16225 	/* Set the VLAN bit map */
16226 	if (phba->valid_vlan) {
16227 		fcf_record->vlan_bitmap[phba->vlan_id / 8]
16228 			= 1 << (phba->vlan_id % 8);
16229 	}
16230 }
16231 
16232 /**
16233  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
16234  * @phba: pointer to lpfc hba data structure.
16235  * @fcf_index: FCF table entry offset.
16236  *
16237  * This routine is invoked to scan the entire FCF table by reading FCF
16238  * record and processing it one at a time starting from the @fcf_index
16239  * for initial FCF discovery or fast FCF failover rediscovery.
16240  *
16241  * Return 0 if the mailbox command is submitted successfully, none 0
16242  * otherwise.
16243  **/
16244 int
16245 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16246 {
16247 	int rc = 0, error;
16248 	LPFC_MBOXQ_t *mboxq;
16249 
16250 	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
16251 	phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
16252 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16253 	if (!mboxq) {
16254 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16255 				"2000 Failed to allocate mbox for "
16256 				"READ_FCF cmd\n");
16257 		error = -ENOMEM;
16258 		goto fail_fcf_scan;
16259 	}
16260 	/* Construct the read FCF record mailbox command */
16261 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16262 	if (rc) {
16263 		error = -EINVAL;
16264 		goto fail_fcf_scan;
16265 	}
16266 	/* Issue the mailbox command asynchronously */
16267 	mboxq->vport = phba->pport;
16268 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
16269 
16270 	spin_lock_irq(&phba->hbalock);
16271 	phba->hba_flag |= FCF_TS_INPROG;
16272 	spin_unlock_irq(&phba->hbalock);
16273 
16274 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16275 	if (rc == MBX_NOT_FINISHED)
16276 		error = -EIO;
16277 	else {
16278 		/* Reset eligible FCF count for new scan */
16279 		if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
16280 			phba->fcf.eligible_fcf_cnt = 0;
16281 		error = 0;
16282 	}
16283 fail_fcf_scan:
16284 	if (error) {
16285 		if (mboxq)
16286 			lpfc_sli4_mbox_cmd_free(phba, mboxq);
16287 		/* FCF scan failed, clear FCF_TS_INPROG flag */
16288 		spin_lock_irq(&phba->hbalock);
16289 		phba->hba_flag &= ~FCF_TS_INPROG;
16290 		spin_unlock_irq(&phba->hbalock);
16291 	}
16292 	return error;
16293 }
16294 
16295 /**
16296  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
16297  * @phba: pointer to lpfc hba data structure.
16298  * @fcf_index: FCF table entry offset.
16299  *
16300  * This routine is invoked to read an FCF record indicated by @fcf_index
16301  * and to use it for FLOGI roundrobin FCF failover.
16302  *
16303  * Return 0 if the mailbox command is submitted successfully, none 0
16304  * otherwise.
16305  **/
16306 int
16307 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16308 {
16309 	int rc = 0, error;
16310 	LPFC_MBOXQ_t *mboxq;
16311 
16312 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16313 	if (!mboxq) {
16314 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16315 				"2763 Failed to allocate mbox for "
16316 				"READ_FCF cmd\n");
16317 		error = -ENOMEM;
16318 		goto fail_fcf_read;
16319 	}
16320 	/* Construct the read FCF record mailbox command */
16321 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16322 	if (rc) {
16323 		error = -EINVAL;
16324 		goto fail_fcf_read;
16325 	}
16326 	/* Issue the mailbox command asynchronously */
16327 	mboxq->vport = phba->pport;
16328 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
16329 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16330 	if (rc == MBX_NOT_FINISHED)
16331 		error = -EIO;
16332 	else
16333 		error = 0;
16334 
16335 fail_fcf_read:
16336 	if (error && mboxq)
16337 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
16338 	return error;
16339 }
16340 
16341 /**
16342  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
16343  * @phba: pointer to lpfc hba data structure.
16344  * @fcf_index: FCF table entry offset.
16345  *
16346  * This routine is invoked to read an FCF record indicated by @fcf_index to
16347  * determine whether it's eligible for FLOGI roundrobin failover list.
16348  *
16349  * Return 0 if the mailbox command is submitted successfully, none 0
16350  * otherwise.
16351  **/
16352 int
16353 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16354 {
16355 	int rc = 0, error;
16356 	LPFC_MBOXQ_t *mboxq;
16357 
16358 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16359 	if (!mboxq) {
16360 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16361 				"2758 Failed to allocate mbox for "
16362 				"READ_FCF cmd\n");
16363 				error = -ENOMEM;
16364 				goto fail_fcf_read;
16365 	}
16366 	/* Construct the read FCF record mailbox command */
16367 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16368 	if (rc) {
16369 		error = -EINVAL;
16370 		goto fail_fcf_read;
16371 	}
16372 	/* Issue the mailbox command asynchronously */
16373 	mboxq->vport = phba->pport;
16374 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
16375 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16376 	if (rc == MBX_NOT_FINISHED)
16377 		error = -EIO;
16378 	else
16379 		error = 0;
16380 
16381 fail_fcf_read:
16382 	if (error && mboxq)
16383 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
16384 	return error;
16385 }
16386 
16387 /**
16388  * lpfc_check_next_fcf_pri_level
16389  * phba pointer to the lpfc_hba struct for this port.
16390  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
16391  * routine when the rr_bmask is empty. The FCF indecies are put into the
16392  * rr_bmask based on their priority level. Starting from the highest priority
16393  * to the lowest. The most likely FCF candidate will be in the highest
16394  * priority group. When this routine is called it searches the fcf_pri list for
16395  * next lowest priority group and repopulates the rr_bmask with only those
16396  * fcf_indexes.
16397  * returns:
16398  * 1=success 0=failure
16399  **/
16400 static int
16401 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
16402 {
16403 	uint16_t next_fcf_pri;
16404 	uint16_t last_index;
16405 	struct lpfc_fcf_pri *fcf_pri;
16406 	int rc;
16407 	int ret = 0;
16408 
16409 	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
16410 			LPFC_SLI4_FCF_TBL_INDX_MAX);
16411 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16412 			"3060 Last IDX %d\n", last_index);
16413 
16414 	/* Verify the priority list has 2 or more entries */
16415 	spin_lock_irq(&phba->hbalock);
16416 	if (list_empty(&phba->fcf.fcf_pri_list) ||
16417 	    list_is_singular(&phba->fcf.fcf_pri_list)) {
16418 		spin_unlock_irq(&phba->hbalock);
16419 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16420 			"3061 Last IDX %d\n", last_index);
16421 		return 0; /* Empty rr list */
16422 	}
16423 	spin_unlock_irq(&phba->hbalock);
16424 
16425 	next_fcf_pri = 0;
16426 	/*
16427 	 * Clear the rr_bmask and set all of the bits that are at this
16428 	 * priority.
16429 	 */
16430 	memset(phba->fcf.fcf_rr_bmask, 0,
16431 			sizeof(*phba->fcf.fcf_rr_bmask));
16432 	spin_lock_irq(&phba->hbalock);
16433 	list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
16434 		if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
16435 			continue;
16436 		/*
16437 		 * the 1st priority that has not FLOGI failed
16438 		 * will be the highest.
16439 		 */
16440 		if (!next_fcf_pri)
16441 			next_fcf_pri = fcf_pri->fcf_rec.priority;
16442 		spin_unlock_irq(&phba->hbalock);
16443 		if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
16444 			rc = lpfc_sli4_fcf_rr_index_set(phba,
16445 						fcf_pri->fcf_rec.fcf_index);
16446 			if (rc)
16447 				return 0;
16448 		}
16449 		spin_lock_irq(&phba->hbalock);
16450 	}
16451 	/*
16452 	 * if next_fcf_pri was not set above and the list is not empty then
16453 	 * we have failed flogis on all of them. So reset flogi failed
16454 	 * and start at the beginning.
16455 	 */
16456 	if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
16457 		list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
16458 			fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
16459 			/*
16460 			 * the 1st priority that has not FLOGI failed
16461 			 * will be the highest.
16462 			 */
16463 			if (!next_fcf_pri)
16464 				next_fcf_pri = fcf_pri->fcf_rec.priority;
16465 			spin_unlock_irq(&phba->hbalock);
16466 			if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
16467 				rc = lpfc_sli4_fcf_rr_index_set(phba,
16468 						fcf_pri->fcf_rec.fcf_index);
16469 				if (rc)
16470 					return 0;
16471 			}
16472 			spin_lock_irq(&phba->hbalock);
16473 		}
16474 	} else
16475 		ret = 1;
16476 	spin_unlock_irq(&phba->hbalock);
16477 
16478 	return ret;
16479 }
16480 /**
16481  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
16482  * @phba: pointer to lpfc hba data structure.
16483  *
16484  * This routine is to get the next eligible FCF record index in a round
16485  * robin fashion. If the next eligible FCF record index equals to the
16486  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
16487  * shall be returned, otherwise, the next eligible FCF record's index
16488  * shall be returned.
16489  **/
16490 uint16_t
16491 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
16492 {
16493 	uint16_t next_fcf_index;
16494 
16495 initial_priority:
16496 	/* Search start from next bit of currently registered FCF index */
16497 	next_fcf_index = phba->fcf.current_rec.fcf_indx;
16498 
16499 next_priority:
16500 	/* Determine the next fcf index to check */
16501 	next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
16502 	next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
16503 				       LPFC_SLI4_FCF_TBL_INDX_MAX,
16504 				       next_fcf_index);
16505 
16506 	/* Wrap around condition on phba->fcf.fcf_rr_bmask */
16507 	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16508 		/*
16509 		 * If we have wrapped then we need to clear the bits that
16510 		 * have been tested so that we can detect when we should
16511 		 * change the priority level.
16512 		 */
16513 		next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
16514 					       LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
16515 	}
16516 
16517 
16518 	/* Check roundrobin failover list empty condition */
16519 	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
16520 		next_fcf_index == phba->fcf.current_rec.fcf_indx) {
16521 		/*
16522 		 * If next fcf index is not found check if there are lower
16523 		 * Priority level fcf's in the fcf_priority list.
16524 		 * Set up the rr_bmask with all of the avaiable fcf bits
16525 		 * at that level and continue the selection process.
16526 		 */
16527 		if (lpfc_check_next_fcf_pri_level(phba))
16528 			goto initial_priority;
16529 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
16530 				"2844 No roundrobin failover FCF available\n");
16531 		if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
16532 			return LPFC_FCOE_FCF_NEXT_NONE;
16533 		else {
16534 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
16535 				"3063 Only FCF available idx %d, flag %x\n",
16536 				next_fcf_index,
16537 			phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
16538 			return next_fcf_index;
16539 		}
16540 	}
16541 
16542 	if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
16543 		phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
16544 		LPFC_FCF_FLOGI_FAILED) {
16545 		if (list_is_singular(&phba->fcf.fcf_pri_list))
16546 			return LPFC_FCOE_FCF_NEXT_NONE;
16547 
16548 		goto next_priority;
16549 	}
16550 
16551 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16552 			"2845 Get next roundrobin failover FCF (x%x)\n",
16553 			next_fcf_index);
16554 
16555 	return next_fcf_index;
16556 }
16557 
16558 /**
16559  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
16560  * @phba: pointer to lpfc hba data structure.
16561  *
16562  * This routine sets the FCF record index in to the eligible bmask for
16563  * roundrobin failover search. It checks to make sure that the index
16564  * does not go beyond the range of the driver allocated bmask dimension
16565  * before setting the bit.
16566  *
16567  * Returns 0 if the index bit successfully set, otherwise, it returns
16568  * -EINVAL.
16569  **/
16570 int
16571 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
16572 {
16573 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16574 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16575 				"2610 FCF (x%x) reached driver's book "
16576 				"keeping dimension:x%x\n",
16577 				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
16578 		return -EINVAL;
16579 	}
16580 	/* Set the eligible FCF record index bmask */
16581 	set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
16582 
16583 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16584 			"2790 Set FCF (x%x) to roundrobin FCF failover "
16585 			"bmask\n", fcf_index);
16586 
16587 	return 0;
16588 }
16589 
16590 /**
16591  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
16592  * @phba: pointer to lpfc hba data structure.
16593  *
16594  * This routine clears the FCF record index from the eligible bmask for
16595  * roundrobin failover search. It checks to make sure that the index
16596  * does not go beyond the range of the driver allocated bmask dimension
16597  * before clearing the bit.
16598  **/
16599 void
16600 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
16601 {
16602 	struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
16603 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16604 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16605 				"2762 FCF (x%x) reached driver's book "
16606 				"keeping dimension:x%x\n",
16607 				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
16608 		return;
16609 	}
16610 	/* Clear the eligible FCF record index bmask */
16611 	spin_lock_irq(&phba->hbalock);
16612 	list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
16613 				 list) {
16614 		if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
16615 			list_del_init(&fcf_pri->list);
16616 			break;
16617 		}
16618 	}
16619 	spin_unlock_irq(&phba->hbalock);
16620 	clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
16621 
16622 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16623 			"2791 Clear FCF (x%x) from roundrobin failover "
16624 			"bmask\n", fcf_index);
16625 }
16626 
16627 /**
16628  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
16629  * @phba: pointer to lpfc hba data structure.
16630  *
16631  * This routine is the completion routine for the rediscover FCF table mailbox
16632  * command. If the mailbox command returned failure, it will try to stop the
16633  * FCF rediscover wait timer.
16634  **/
16635 static void
16636 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
16637 {
16638 	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
16639 	uint32_t shdr_status, shdr_add_status;
16640 
16641 	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
16642 
16643 	shdr_status = bf_get(lpfc_mbox_hdr_status,
16644 			     &redisc_fcf->header.cfg_shdr.response);
16645 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
16646 			     &redisc_fcf->header.cfg_shdr.response);
16647 	if (shdr_status || shdr_add_status) {
16648 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16649 				"2746 Requesting for FCF rediscovery failed "
16650 				"status x%x add_status x%x\n",
16651 				shdr_status, shdr_add_status);
16652 		if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
16653 			spin_lock_irq(&phba->hbalock);
16654 			phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
16655 			spin_unlock_irq(&phba->hbalock);
16656 			/*
16657 			 * CVL event triggered FCF rediscover request failed,
16658 			 * last resort to re-try current registered FCF entry.
16659 			 */
16660 			lpfc_retry_pport_discovery(phba);
16661 		} else {
16662 			spin_lock_irq(&phba->hbalock);
16663 			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
16664 			spin_unlock_irq(&phba->hbalock);
16665 			/*
16666 			 * DEAD FCF event triggered FCF rediscover request
16667 			 * failed, last resort to fail over as a link down
16668 			 * to FCF registration.
16669 			 */
16670 			lpfc_sli4_fcf_dead_failthrough(phba);
16671 		}
16672 	} else {
16673 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16674 				"2775 Start FCF rediscover quiescent timer\n");
16675 		/*
16676 		 * Start FCF rediscovery wait timer for pending FCF
16677 		 * before rescan FCF record table.
16678 		 */
16679 		lpfc_fcf_redisc_wait_start_timer(phba);
16680 	}
16681 
16682 	mempool_free(mbox, phba->mbox_mem_pool);
16683 }
16684 
16685 /**
16686  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
16687  * @phba: pointer to lpfc hba data structure.
16688  *
16689  * This routine is invoked to request for rediscovery of the entire FCF table
16690  * by the port.
16691  **/
16692 int
16693 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
16694 {
16695 	LPFC_MBOXQ_t *mbox;
16696 	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
16697 	int rc, length;
16698 
16699 	/* Cancel retry delay timers to all vports before FCF rediscover */
16700 	lpfc_cancel_all_vport_retry_delay_timer(phba);
16701 
16702 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16703 	if (!mbox) {
16704 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16705 				"2745 Failed to allocate mbox for "
16706 				"requesting FCF rediscover.\n");
16707 		return -ENOMEM;
16708 	}
16709 
16710 	length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
16711 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16712 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16713 			 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
16714 			 length, LPFC_SLI4_MBX_EMBED);
16715 
16716 	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
16717 	/* Set count to 0 for invalidating the entire FCF database */
16718 	bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
16719 
16720 	/* Issue the mailbox command asynchronously */
16721 	mbox->vport = phba->pport;
16722 	mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
16723 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
16724 
16725 	if (rc == MBX_NOT_FINISHED) {
16726 		mempool_free(mbox, phba->mbox_mem_pool);
16727 		return -EIO;
16728 	}
16729 	return 0;
16730 }
16731 
16732 /**
16733  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
16734  * @phba: pointer to lpfc hba data structure.
16735  *
16736  * This function is the failover routine as a last resort to the FCF DEAD
16737  * event when driver failed to perform fast FCF failover.
16738  **/
16739 void
16740 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
16741 {
16742 	uint32_t link_state;
16743 
16744 	/*
16745 	 * Last resort as FCF DEAD event failover will treat this as
16746 	 * a link down, but save the link state because we don't want
16747 	 * it to be changed to Link Down unless it is already down.
16748 	 */
16749 	link_state = phba->link_state;
16750 	lpfc_linkdown(phba);
16751 	phba->link_state = link_state;
16752 
16753 	/* Unregister FCF if no devices connected to it */
16754 	lpfc_unregister_unused_fcf(phba);
16755 }
16756 
16757 /**
16758  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
16759  * @phba: pointer to lpfc hba data structure.
16760  * @rgn23_data: pointer to configure region 23 data.
16761  *
16762  * This function gets SLI3 port configure region 23 data through memory dump
16763  * mailbox command. When it successfully retrieves data, the size of the data
16764  * will be returned, otherwise, 0 will be returned.
16765  **/
16766 static uint32_t
16767 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
16768 {
16769 	LPFC_MBOXQ_t *pmb = NULL;
16770 	MAILBOX_t *mb;
16771 	uint32_t offset = 0;
16772 	int rc;
16773 
16774 	if (!rgn23_data)
16775 		return 0;
16776 
16777 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16778 	if (!pmb) {
16779 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16780 				"2600 failed to allocate mailbox memory\n");
16781 		return 0;
16782 	}
16783 	mb = &pmb->u.mb;
16784 
16785 	do {
16786 		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
16787 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
16788 
16789 		if (rc != MBX_SUCCESS) {
16790 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16791 					"2601 failed to read config "
16792 					"region 23, rc 0x%x Status 0x%x\n",
16793 					rc, mb->mbxStatus);
16794 			mb->un.varDmp.word_cnt = 0;
16795 		}
16796 		/*
16797 		 * dump mem may return a zero when finished or we got a
16798 		 * mailbox error, either way we are done.
16799 		 */
16800 		if (mb->un.varDmp.word_cnt == 0)
16801 			break;
16802 		if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
16803 			mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
16804 
16805 		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
16806 				       rgn23_data + offset,
16807 				       mb->un.varDmp.word_cnt);
16808 		offset += mb->un.varDmp.word_cnt;
16809 	} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
16810 
16811 	mempool_free(pmb, phba->mbox_mem_pool);
16812 	return offset;
16813 }
16814 
16815 /**
16816  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
16817  * @phba: pointer to lpfc hba data structure.
16818  * @rgn23_data: pointer to configure region 23 data.
16819  *
16820  * This function gets SLI4 port configure region 23 data through memory dump
16821  * mailbox command. When it successfully retrieves data, the size of the data
16822  * will be returned, otherwise, 0 will be returned.
16823  **/
16824 static uint32_t
16825 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
16826 {
16827 	LPFC_MBOXQ_t *mboxq = NULL;
16828 	struct lpfc_dmabuf *mp = NULL;
16829 	struct lpfc_mqe *mqe;
16830 	uint32_t data_length = 0;
16831 	int rc;
16832 
16833 	if (!rgn23_data)
16834 		return 0;
16835 
16836 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16837 	if (!mboxq) {
16838 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16839 				"3105 failed to allocate mailbox memory\n");
16840 		return 0;
16841 	}
16842 
16843 	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
16844 		goto out;
16845 	mqe = &mboxq->u.mqe;
16846 	mp = (struct lpfc_dmabuf *) mboxq->context1;
16847 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
16848 	if (rc)
16849 		goto out;
16850 	data_length = mqe->un.mb_words[5];
16851 	if (data_length == 0)
16852 		goto out;
16853 	if (data_length > DMP_RGN23_SIZE) {
16854 		data_length = 0;
16855 		goto out;
16856 	}
16857 	lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
16858 out:
16859 	mempool_free(mboxq, phba->mbox_mem_pool);
16860 	if (mp) {
16861 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
16862 		kfree(mp);
16863 	}
16864 	return data_length;
16865 }
16866 
16867 /**
16868  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
16869  * @phba: pointer to lpfc hba data structure.
16870  *
16871  * This function read region 23 and parse TLV for port status to
16872  * decide if the user disaled the port. If the TLV indicates the
16873  * port is disabled, the hba_flag is set accordingly.
16874  **/
16875 void
16876 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
16877 {
16878 	uint8_t *rgn23_data = NULL;
16879 	uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
16880 	uint32_t offset = 0;
16881 
16882 	/* Get adapter Region 23 data */
16883 	rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
16884 	if (!rgn23_data)
16885 		goto out;
16886 
16887 	if (phba->sli_rev < LPFC_SLI_REV4)
16888 		data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
16889 	else {
16890 		if_type = bf_get(lpfc_sli_intf_if_type,
16891 				 &phba->sli4_hba.sli_intf);
16892 		if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
16893 			goto out;
16894 		data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
16895 	}
16896 
16897 	if (!data_size)
16898 		goto out;
16899 
16900 	/* Check the region signature first */
16901 	if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
16902 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16903 			"2619 Config region 23 has bad signature\n");
16904 			goto out;
16905 	}
16906 	offset += 4;
16907 
16908 	/* Check the data structure version */
16909 	if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
16910 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16911 			"2620 Config region 23 has bad version\n");
16912 		goto out;
16913 	}
16914 	offset += 4;
16915 
16916 	/* Parse TLV entries in the region */
16917 	while (offset < data_size) {
16918 		if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
16919 			break;
16920 		/*
16921 		 * If the TLV is not driver specific TLV or driver id is
16922 		 * not linux driver id, skip the record.
16923 		 */
16924 		if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
16925 		    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
16926 		    (rgn23_data[offset + 3] != 0)) {
16927 			offset += rgn23_data[offset + 1] * 4 + 4;
16928 			continue;
16929 		}
16930 
16931 		/* Driver found a driver specific TLV in the config region */
16932 		sub_tlv_len = rgn23_data[offset + 1] * 4;
16933 		offset += 4;
16934 		tlv_offset = 0;
16935 
16936 		/*
16937 		 * Search for configured port state sub-TLV.
16938 		 */
16939 		while ((offset < data_size) &&
16940 			(tlv_offset < sub_tlv_len)) {
16941 			if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
16942 				offset += 4;
16943 				tlv_offset += 4;
16944 				break;
16945 			}
16946 			if (rgn23_data[offset] != PORT_STE_TYPE) {
16947 				offset += rgn23_data[offset + 1] * 4 + 4;
16948 				tlv_offset += rgn23_data[offset + 1] * 4 + 4;
16949 				continue;
16950 			}
16951 
16952 			/* This HBA contains PORT_STE configured */
16953 			if (!rgn23_data[offset + 2])
16954 				phba->hba_flag |= LINK_DISABLED;
16955 
16956 			goto out;
16957 		}
16958 	}
16959 
16960 out:
16961 	kfree(rgn23_data);
16962 	return;
16963 }
16964 
16965 /**
16966  * lpfc_wr_object - write an object to the firmware
16967  * @phba: HBA structure that indicates port to create a queue on.
16968  * @dmabuf_list: list of dmabufs to write to the port.
16969  * @size: the total byte value of the objects to write to the port.
16970  * @offset: the current offset to be used to start the transfer.
16971  *
16972  * This routine will create a wr_object mailbox command to send to the port.
16973  * the mailbox command will be constructed using the dma buffers described in
16974  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
16975  * BDEs that the imbedded mailbox can support. The @offset variable will be
16976  * used to indicate the starting offset of the transfer and will also return
16977  * the offset after the write object mailbox has completed. @size is used to
16978  * determine the end of the object and whether the eof bit should be set.
16979  *
16980  * Return 0 is successful and offset will contain the the new offset to use
16981  * for the next write.
16982  * Return negative value for error cases.
16983  **/
16984 int
16985 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
16986 	       uint32_t size, uint32_t *offset)
16987 {
16988 	struct lpfc_mbx_wr_object *wr_object;
16989 	LPFC_MBOXQ_t *mbox;
16990 	int rc = 0, i = 0;
16991 	uint32_t shdr_status, shdr_add_status;
16992 	uint32_t mbox_tmo;
16993 	union lpfc_sli4_cfg_shdr *shdr;
16994 	struct lpfc_dmabuf *dmabuf;
16995 	uint32_t written = 0;
16996 
16997 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16998 	if (!mbox)
16999 		return -ENOMEM;
17000 
17001 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17002 			LPFC_MBOX_OPCODE_WRITE_OBJECT,
17003 			sizeof(struct lpfc_mbx_wr_object) -
17004 			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17005 
17006 	wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
17007 	wr_object->u.request.write_offset = *offset;
17008 	sprintf((uint8_t *)wr_object->u.request.object_name, "/");
17009 	wr_object->u.request.object_name[0] =
17010 		cpu_to_le32(wr_object->u.request.object_name[0]);
17011 	bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
17012 	list_for_each_entry(dmabuf, dmabuf_list, list) {
17013 		if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
17014 			break;
17015 		wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
17016 		wr_object->u.request.bde[i].addrHigh =
17017 			putPaddrHigh(dmabuf->phys);
17018 		if (written + SLI4_PAGE_SIZE >= size) {
17019 			wr_object->u.request.bde[i].tus.f.bdeSize =
17020 				(size - written);
17021 			written += (size - written);
17022 			bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
17023 		} else {
17024 			wr_object->u.request.bde[i].tus.f.bdeSize =
17025 				SLI4_PAGE_SIZE;
17026 			written += SLI4_PAGE_SIZE;
17027 		}
17028 		i++;
17029 	}
17030 	wr_object->u.request.bde_count = i;
17031 	bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
17032 	if (!phba->sli4_hba.intr_enable)
17033 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17034 	else {
17035 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17036 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17037 	}
17038 	/* The IOCTL status is embedded in the mailbox subheader. */
17039 	shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
17040 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17041 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17042 	if (rc != MBX_TIMEOUT)
17043 		mempool_free(mbox, phba->mbox_mem_pool);
17044 	if (shdr_status || shdr_add_status || rc) {
17045 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17046 				"3025 Write Object mailbox failed with "
17047 				"status x%x add_status x%x, mbx status x%x\n",
17048 				shdr_status, shdr_add_status, rc);
17049 		rc = -ENXIO;
17050 	} else
17051 		*offset += wr_object->u.response.actual_write_length;
17052 	return rc;
17053 }
17054 
17055 /**
17056  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
17057  * @vport: pointer to vport data structure.
17058  *
17059  * This function iterate through the mailboxq and clean up all REG_LOGIN
17060  * and REG_VPI mailbox commands associated with the vport. This function
17061  * is called when driver want to restart discovery of the vport due to
17062  * a Clear Virtual Link event.
17063  **/
17064 void
17065 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
17066 {
17067 	struct lpfc_hba *phba = vport->phba;
17068 	LPFC_MBOXQ_t *mb, *nextmb;
17069 	struct lpfc_dmabuf *mp;
17070 	struct lpfc_nodelist *ndlp;
17071 	struct lpfc_nodelist *act_mbx_ndlp = NULL;
17072 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
17073 	LIST_HEAD(mbox_cmd_list);
17074 	uint8_t restart_loop;
17075 
17076 	/* Clean up internally queued mailbox commands with the vport */
17077 	spin_lock_irq(&phba->hbalock);
17078 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
17079 		if (mb->vport != vport)
17080 			continue;
17081 
17082 		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
17083 			(mb->u.mb.mbxCommand != MBX_REG_VPI))
17084 			continue;
17085 
17086 		list_del(&mb->list);
17087 		list_add_tail(&mb->list, &mbox_cmd_list);
17088 	}
17089 	/* Clean up active mailbox command with the vport */
17090 	mb = phba->sli.mbox_active;
17091 	if (mb && (mb->vport == vport)) {
17092 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
17093 			(mb->u.mb.mbxCommand == MBX_REG_VPI))
17094 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17095 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17096 			act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
17097 			/* Put reference count for delayed processing */
17098 			act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
17099 			/* Unregister the RPI when mailbox complete */
17100 			mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
17101 		}
17102 	}
17103 	/* Cleanup any mailbox completions which are not yet processed */
17104 	do {
17105 		restart_loop = 0;
17106 		list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
17107 			/*
17108 			 * If this mailox is already processed or it is
17109 			 * for another vport ignore it.
17110 			 */
17111 			if ((mb->vport != vport) ||
17112 				(mb->mbox_flag & LPFC_MBX_IMED_UNREG))
17113 				continue;
17114 
17115 			if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
17116 				(mb->u.mb.mbxCommand != MBX_REG_VPI))
17117 				continue;
17118 
17119 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17120 			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17121 				ndlp = (struct lpfc_nodelist *)mb->context2;
17122 				/* Unregister the RPI when mailbox complete */
17123 				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
17124 				restart_loop = 1;
17125 				spin_unlock_irq(&phba->hbalock);
17126 				spin_lock(shost->host_lock);
17127 				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17128 				spin_unlock(shost->host_lock);
17129 				spin_lock_irq(&phba->hbalock);
17130 				break;
17131 			}
17132 		}
17133 	} while (restart_loop);
17134 
17135 	spin_unlock_irq(&phba->hbalock);
17136 
17137 	/* Release the cleaned-up mailbox commands */
17138 	while (!list_empty(&mbox_cmd_list)) {
17139 		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
17140 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17141 			mp = (struct lpfc_dmabuf *) (mb->context1);
17142 			if (mp) {
17143 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
17144 				kfree(mp);
17145 			}
17146 			ndlp = (struct lpfc_nodelist *) mb->context2;
17147 			mb->context2 = NULL;
17148 			if (ndlp) {
17149 				spin_lock(shost->host_lock);
17150 				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17151 				spin_unlock(shost->host_lock);
17152 				lpfc_nlp_put(ndlp);
17153 			}
17154 		}
17155 		mempool_free(mb, phba->mbox_mem_pool);
17156 	}
17157 
17158 	/* Release the ndlp with the cleaned-up active mailbox command */
17159 	if (act_mbx_ndlp) {
17160 		spin_lock(shost->host_lock);
17161 		act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17162 		spin_unlock(shost->host_lock);
17163 		lpfc_nlp_put(act_mbx_ndlp);
17164 	}
17165 }
17166 
17167 /**
17168  * lpfc_drain_txq - Drain the txq
17169  * @phba: Pointer to HBA context object.
17170  *
17171  * This function attempt to submit IOCBs on the txq
17172  * to the adapter.  For SLI4 adapters, the txq contains
17173  * ELS IOCBs that have been deferred because the there
17174  * are no SGLs.  This congestion can occur with large
17175  * vport counts during node discovery.
17176  **/
17177 
17178 uint32_t
17179 lpfc_drain_txq(struct lpfc_hba *phba)
17180 {
17181 	LIST_HEAD(completions);
17182 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
17183 	struct lpfc_iocbq *piocbq = NULL;
17184 	unsigned long iflags = 0;
17185 	char *fail_msg = NULL;
17186 	struct lpfc_sglq *sglq;
17187 	union lpfc_wqe wqe;
17188 	uint32_t txq_cnt = 0;
17189 
17190 	spin_lock_irqsave(&pring->ring_lock, iflags);
17191 	list_for_each_entry(piocbq, &pring->txq, list) {
17192 		txq_cnt++;
17193 	}
17194 
17195 	if (txq_cnt > pring->txq_max)
17196 		pring->txq_max = txq_cnt;
17197 
17198 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
17199 
17200 	while (!list_empty(&pring->txq)) {
17201 		spin_lock_irqsave(&pring->ring_lock, iflags);
17202 
17203 		piocbq = lpfc_sli_ringtx_get(phba, pring);
17204 		if (!piocbq) {
17205 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
17206 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17207 				"2823 txq empty and txq_cnt is %d\n ",
17208 				txq_cnt);
17209 			break;
17210 		}
17211 		sglq = __lpfc_sli_get_sglq(phba, piocbq);
17212 		if (!sglq) {
17213 			__lpfc_sli_ringtx_put(phba, pring, piocbq);
17214 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
17215 			break;
17216 		}
17217 		txq_cnt--;
17218 
17219 		/* The xri and iocb resources secured,
17220 		 * attempt to issue request
17221 		 */
17222 		piocbq->sli4_lxritag = sglq->sli4_lxritag;
17223 		piocbq->sli4_xritag = sglq->sli4_xritag;
17224 		if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
17225 			fail_msg = "to convert bpl to sgl";
17226 		else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
17227 			fail_msg = "to convert iocb to wqe";
17228 		else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
17229 			fail_msg = " - Wq is full";
17230 		else
17231 			lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
17232 
17233 		if (fail_msg) {
17234 			/* Failed means we can't issue and need to cancel */
17235 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17236 					"2822 IOCB failed %s iotag 0x%x "
17237 					"xri 0x%x\n",
17238 					fail_msg,
17239 					piocbq->iotag, piocbq->sli4_xritag);
17240 			list_add_tail(&piocbq->list, &completions);
17241 		}
17242 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
17243 	}
17244 
17245 	/* Cancel all the IOCBs that cannot be issued */
17246 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
17247 				IOERR_SLI_ABORTED);
17248 
17249 	return txq_cnt;
17250 }
17251