xref: /openbmc/linux/drivers/scsi/bfa/bfa_svc.c (revision 2359ccdd)
1 /*
2  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3  * Copyright (c) 2014- QLogic Corporation.
4  * All rights reserved
5  * www.qlogic.com
6  *
7  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License (GPL) Version 2 as
11  * published by the Free Software Foundation
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include "bfad_drv.h"
20 #include "bfad_im.h"
21 #include "bfa_plog.h"
22 #include "bfa_cs.h"
23 #include "bfa_modules.h"
24 
25 BFA_TRC_FILE(HAL, FCXP);
26 
27 /*
28  * LPS related definitions
29  */
30 #define BFA_LPS_MIN_LPORTS      (1)
31 #define BFA_LPS_MAX_LPORTS      (256)
32 
33 /*
34  * Maximum Vports supported per physical port or vf.
35  */
36 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
37 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
38 
39 
40 /*
41  * FC PORT related definitions
42  */
43 /*
44  * The port is considered disabled if corresponding physical port or IOC are
45  * disabled explicitly
46  */
47 #define BFA_PORT_IS_DISABLED(bfa) \
48 	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
49 	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
50 
51 /*
52  * BFA port state machine events
53  */
54 enum bfa_fcport_sm_event {
55 	BFA_FCPORT_SM_START	= 1,	/*  start port state machine	*/
56 	BFA_FCPORT_SM_STOP	= 2,	/*  stop port state machine	*/
57 	BFA_FCPORT_SM_ENABLE	= 3,	/*  enable port		*/
58 	BFA_FCPORT_SM_DISABLE	= 4,	/*  disable port state machine */
59 	BFA_FCPORT_SM_FWRSP	= 5,	/*  firmware enable/disable rsp */
60 	BFA_FCPORT_SM_LINKUP	= 6,	/*  firmware linkup event	*/
61 	BFA_FCPORT_SM_LINKDOWN	= 7,	/*  firmware linkup down	*/
62 	BFA_FCPORT_SM_QRESUME	= 8,	/*  CQ space available	*/
63 	BFA_FCPORT_SM_HWFAIL	= 9,	/*  IOC h/w failure		*/
64 	BFA_FCPORT_SM_DPORTENABLE = 10, /*  enable dport      */
65 	BFA_FCPORT_SM_DPORTDISABLE = 11,/*  disable dport     */
66 	BFA_FCPORT_SM_FAA_MISCONFIG = 12,	/* FAA misconfiguratin */
67 	BFA_FCPORT_SM_DDPORTENABLE  = 13,	/* enable ddport	*/
68 	BFA_FCPORT_SM_DDPORTDISABLE = 14,	/* disable ddport	*/
69 };
70 
71 /*
72  * BFA port link notification state machine events
73  */
74 
75 enum bfa_fcport_ln_sm_event {
76 	BFA_FCPORT_LN_SM_LINKUP		= 1,	/*  linkup event	*/
77 	BFA_FCPORT_LN_SM_LINKDOWN	= 2,	/*  linkdown event	*/
78 	BFA_FCPORT_LN_SM_NOTIFICATION	= 3	/*  done notification	*/
79 };
80 
81 /*
82  * RPORT related definitions
83  */
84 #define bfa_rport_offline_cb(__rp) do {					\
85 	if ((__rp)->bfa->fcs)						\
86 		bfa_cb_rport_offline((__rp)->rport_drv);      \
87 	else {								\
88 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
89 				__bfa_cb_rport_offline, (__rp));      \
90 	}								\
91 } while (0)
92 
93 #define bfa_rport_online_cb(__rp) do {					\
94 	if ((__rp)->bfa->fcs)						\
95 		bfa_cb_rport_online((__rp)->rport_drv);      \
96 	else {								\
97 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
98 				  __bfa_cb_rport_online, (__rp));      \
99 		}							\
100 } while (0)
101 
102 /*
103  * forward declarations FCXP related functions
104  */
105 static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
106 static void	hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
107 				struct bfi_fcxp_send_rsp_s *fcxp_rsp);
108 static void	hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
109 				struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
110 static void	bfa_fcxp_qresume(void *cbarg);
111 static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
112 				struct bfi_fcxp_send_req_s *send_req);
113 
114 /*
115  * forward declarations for LPS functions
116  */
117 static void bfa_lps_login_rsp(struct bfa_s *bfa,
118 				struct bfi_lps_login_rsp_s *rsp);
119 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
120 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
121 				struct bfi_lps_logout_rsp_s *rsp);
122 static void bfa_lps_reqq_resume(void *lps_arg);
123 static void bfa_lps_free(struct bfa_lps_s *lps);
124 static void bfa_lps_send_login(struct bfa_lps_s *lps);
125 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
126 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
127 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
128 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
129 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
130 
131 /*
132  * forward declaration for LPS state machine
133  */
134 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
135 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
136 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
137 					event);
138 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
139 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
140 					enum bfa_lps_event event);
141 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
142 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
143 					event);
144 
145 /*
146  * forward declaration for FC Port functions
147  */
148 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
149 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
150 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
151 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
152 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
153 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
154 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
155 			enum bfa_port_linkstate event, bfa_boolean_t trunk);
156 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
157 				enum bfa_port_linkstate event);
158 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
159 static void bfa_fcport_stats_get_timeout(void *cbarg);
160 static void bfa_fcport_stats_clr_timeout(void *cbarg);
161 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
162 
163 /*
164  * forward declaration for FC PORT state machine
165  */
166 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
167 					enum bfa_fcport_sm_event event);
168 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
169 					enum bfa_fcport_sm_event event);
170 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
171 					enum bfa_fcport_sm_event event);
172 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
173 					enum bfa_fcport_sm_event event);
174 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
175 					enum bfa_fcport_sm_event event);
176 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
177 					enum bfa_fcport_sm_event event);
178 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
179 					enum bfa_fcport_sm_event event);
180 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
181 					enum bfa_fcport_sm_event event);
182 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
183 					enum bfa_fcport_sm_event event);
184 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
185 					enum bfa_fcport_sm_event event);
186 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
187 					enum bfa_fcport_sm_event event);
188 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
189 					enum bfa_fcport_sm_event event);
190 static void	bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
191 					enum bfa_fcport_sm_event event);
192 static void     bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
193 					enum bfa_fcport_sm_event event);
194 static void	bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
195 					enum bfa_fcport_sm_event event);
196 
197 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
198 					enum bfa_fcport_ln_sm_event event);
199 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
200 					enum bfa_fcport_ln_sm_event event);
201 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
202 					enum bfa_fcport_ln_sm_event event);
203 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
204 					enum bfa_fcport_ln_sm_event event);
205 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
206 					enum bfa_fcport_ln_sm_event event);
207 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
208 					enum bfa_fcport_ln_sm_event event);
209 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
210 					enum bfa_fcport_ln_sm_event event);
211 
212 static struct bfa_sm_table_s hal_port_sm_table[] = {
213 	{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
214 	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
215 	{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
216 	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
217 	{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
218 	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
219 	{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
220 	{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
221 	{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
222 	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
223 	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
224 	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
225 	{BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
226 	{BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
227 	{BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
228 };
229 
230 
231 /*
232  * forward declaration for RPORT related functions
233  */
234 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
235 static void		bfa_rport_free(struct bfa_rport_s *rport);
236 static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
237 static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
238 static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
239 static void		__bfa_cb_rport_online(void *cbarg,
240 						bfa_boolean_t complete);
241 static void		__bfa_cb_rport_offline(void *cbarg,
242 						bfa_boolean_t complete);
243 
244 /*
245  * forward declaration for RPORT state machine
246  */
247 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
248 					enum bfa_rport_event event);
249 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
250 					enum bfa_rport_event event);
251 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
252 					enum bfa_rport_event event);
253 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
254 					enum bfa_rport_event event);
255 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
256 					enum bfa_rport_event event);
257 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
258 					enum bfa_rport_event event);
259 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
260 					enum bfa_rport_event event);
261 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
262 					enum bfa_rport_event event);
263 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
264 					enum bfa_rport_event event);
265 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
266 					enum bfa_rport_event event);
267 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
268 					enum bfa_rport_event event);
269 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
270 					enum bfa_rport_event event);
271 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
272 					enum bfa_rport_event event);
273 
274 /*
275  * PLOG related definitions
276  */
277 static int
278 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
279 {
280 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
281 		(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
282 		return 1;
283 
284 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
285 		(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
286 		return 1;
287 
288 	return 0;
289 }
290 
291 static void
292 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
293 {
294 	u16 tail;
295 	struct bfa_plog_rec_s *pl_recp;
296 
297 	if (plog->plog_enabled == 0)
298 		return;
299 
300 	if (plkd_validate_logrec(pl_rec)) {
301 		WARN_ON(1);
302 		return;
303 	}
304 
305 	tail = plog->tail;
306 
307 	pl_recp = &(plog->plog_recs[tail]);
308 
309 	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
310 
311 	pl_recp->tv = ktime_get_real_seconds();
312 	BFA_PL_LOG_REC_INCR(plog->tail);
313 
314 	if (plog->head == plog->tail)
315 		BFA_PL_LOG_REC_INCR(plog->head);
316 }
317 
318 void
319 bfa_plog_init(struct bfa_plog_s *plog)
320 {
321 	memset((char *)plog, 0, sizeof(struct bfa_plog_s));
322 
323 	memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
324 	plog->head = plog->tail = 0;
325 	plog->plog_enabled = 1;
326 }
327 
328 void
329 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
330 		enum bfa_plog_eid event,
331 		u16 misc, char *log_str)
332 {
333 	struct bfa_plog_rec_s  lp;
334 
335 	if (plog->plog_enabled) {
336 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
337 		lp.mid = mid;
338 		lp.eid = event;
339 		lp.log_type = BFA_PL_LOG_TYPE_STRING;
340 		lp.misc = misc;
341 		strlcpy(lp.log_entry.string_log, log_str,
342 			BFA_PL_STRING_LOG_SZ);
343 		lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
344 		bfa_plog_add(plog, &lp);
345 	}
346 }
347 
348 void
349 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
350 		enum bfa_plog_eid event,
351 		u16 misc, u32 *intarr, u32 num_ints)
352 {
353 	struct bfa_plog_rec_s  lp;
354 	u32 i;
355 
356 	if (num_ints > BFA_PL_INT_LOG_SZ)
357 		num_ints = BFA_PL_INT_LOG_SZ;
358 
359 	if (plog->plog_enabled) {
360 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
361 		lp.mid = mid;
362 		lp.eid = event;
363 		lp.log_type = BFA_PL_LOG_TYPE_INT;
364 		lp.misc = misc;
365 
366 		for (i = 0; i < num_ints; i++)
367 			lp.log_entry.int_log[i] = intarr[i];
368 
369 		lp.log_num_ints = (u8) num_ints;
370 
371 		bfa_plog_add(plog, &lp);
372 	}
373 }
374 
375 void
376 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
377 			enum bfa_plog_eid event,
378 			u16 misc, struct fchs_s *fchdr)
379 {
380 	struct bfa_plog_rec_s  lp;
381 	u32	*tmp_int = (u32 *) fchdr;
382 	u32	ints[BFA_PL_INT_LOG_SZ];
383 
384 	if (plog->plog_enabled) {
385 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
386 
387 		ints[0] = tmp_int[0];
388 		ints[1] = tmp_int[1];
389 		ints[2] = tmp_int[4];
390 
391 		bfa_plog_intarr(plog, mid, event, misc, ints, 3);
392 	}
393 }
394 
395 void
396 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
397 		      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
398 		      u32 pld_w0)
399 {
400 	struct bfa_plog_rec_s  lp;
401 	u32	*tmp_int = (u32 *) fchdr;
402 	u32	ints[BFA_PL_INT_LOG_SZ];
403 
404 	if (plog->plog_enabled) {
405 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
406 
407 		ints[0] = tmp_int[0];
408 		ints[1] = tmp_int[1];
409 		ints[2] = tmp_int[4];
410 		ints[3] = pld_w0;
411 
412 		bfa_plog_intarr(plog, mid, event, misc, ints, 4);
413 	}
414 }
415 
416 
417 /*
418  *  fcxp_pvt BFA FCXP private functions
419  */
420 
421 static void
422 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
423 {
424 	u16	i;
425 	struct bfa_fcxp_s *fcxp;
426 
427 	fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
428 	memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
429 
430 	INIT_LIST_HEAD(&mod->fcxp_req_free_q);
431 	INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
432 	INIT_LIST_HEAD(&mod->fcxp_active_q);
433 	INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
434 	INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
435 
436 	mod->fcxp_list = fcxp;
437 
438 	for (i = 0; i < mod->num_fcxps; i++) {
439 		fcxp->fcxp_mod = mod;
440 		fcxp->fcxp_tag = i;
441 
442 		if (i < (mod->num_fcxps / 2)) {
443 			list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
444 			fcxp->req_rsp = BFA_TRUE;
445 		} else {
446 			list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
447 			fcxp->req_rsp = BFA_FALSE;
448 		}
449 
450 		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
451 		fcxp->reqq_waiting = BFA_FALSE;
452 
453 		fcxp = fcxp + 1;
454 	}
455 
456 	bfa_mem_kva_curp(mod) = (void *)fcxp;
457 }
458 
459 void
460 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
461 		struct bfa_s *bfa)
462 {
463 	struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
464 	struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
465 	struct bfa_mem_dma_s *seg_ptr;
466 	u16	nsegs, idx, per_seg_fcxp;
467 	u16	num_fcxps = cfg->fwcfg.num_fcxp_reqs;
468 	u32	per_fcxp_sz;
469 
470 	if (num_fcxps == 0)
471 		return;
472 
473 	if (cfg->drvcfg.min_cfg)
474 		per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
475 	else
476 		per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
477 
478 	/* dma memory */
479 	nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
480 	per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
481 
482 	bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
483 		if (num_fcxps >= per_seg_fcxp) {
484 			num_fcxps -= per_seg_fcxp;
485 			bfa_mem_dma_setup(minfo, seg_ptr,
486 				per_seg_fcxp * per_fcxp_sz);
487 		} else
488 			bfa_mem_dma_setup(minfo, seg_ptr,
489 				num_fcxps * per_fcxp_sz);
490 	}
491 
492 	/* kva memory */
493 	bfa_mem_kva_setup(minfo, fcxp_kva,
494 		cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
495 }
496 
497 void
498 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
499 		struct bfa_pcidev_s *pcidev)
500 {
501 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
502 
503 	mod->bfa = bfa;
504 	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
505 
506 	/*
507 	 * Initialize FCXP request and response payload sizes.
508 	 */
509 	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
510 	if (!cfg->drvcfg.min_cfg)
511 		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
512 
513 	INIT_LIST_HEAD(&mod->req_wait_q);
514 	INIT_LIST_HEAD(&mod->rsp_wait_q);
515 
516 	claim_fcxps_mem(mod);
517 }
518 
519 void
520 bfa_fcxp_iocdisable(struct bfa_s *bfa)
521 {
522 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
523 	struct bfa_fcxp_s *fcxp;
524 	struct list_head	      *qe, *qen;
525 
526 	/* Enqueue unused fcxp resources to free_q */
527 	list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
528 	list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
529 
530 	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
531 		fcxp = (struct bfa_fcxp_s *) qe;
532 		if (fcxp->caller == NULL) {
533 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
534 					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
535 			bfa_fcxp_free(fcxp);
536 		} else {
537 			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
538 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
539 				     __bfa_fcxp_send_cbfn, fcxp);
540 		}
541 	}
542 }
543 
544 static struct bfa_fcxp_s *
545 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
546 {
547 	struct bfa_fcxp_s *fcxp;
548 
549 	if (req)
550 		bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
551 	else
552 		bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
553 
554 	if (fcxp)
555 		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
556 
557 	return fcxp;
558 }
559 
560 static void
561 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
562 	       struct bfa_s *bfa,
563 	       u8 *use_ibuf,
564 	       u32 *nr_sgles,
565 	       bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
566 	       bfa_fcxp_get_sglen_t *r_sglen_cbfn,
567 	       struct list_head *r_sgpg_q,
568 	       int n_sgles,
569 	       bfa_fcxp_get_sgaddr_t sga_cbfn,
570 	       bfa_fcxp_get_sglen_t sglen_cbfn)
571 {
572 
573 	WARN_ON(bfa == NULL);
574 
575 	bfa_trc(bfa, fcxp->fcxp_tag);
576 
577 	if (n_sgles == 0) {
578 		*use_ibuf = 1;
579 	} else {
580 		WARN_ON(*sga_cbfn == NULL);
581 		WARN_ON(*sglen_cbfn == NULL);
582 
583 		*use_ibuf = 0;
584 		*r_sga_cbfn = sga_cbfn;
585 		*r_sglen_cbfn = sglen_cbfn;
586 
587 		*nr_sgles = n_sgles;
588 
589 		/*
590 		 * alloc required sgpgs
591 		 */
592 		if (n_sgles > BFI_SGE_INLINE)
593 			WARN_ON(1);
594 	}
595 
596 }
597 
598 static void
599 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
600 	       void *caller, struct bfa_s *bfa, int nreq_sgles,
601 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
602 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
603 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
604 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
605 {
606 
607 	WARN_ON(bfa == NULL);
608 
609 	bfa_trc(bfa, fcxp->fcxp_tag);
610 
611 	fcxp->caller = caller;
612 
613 	bfa_fcxp_init_reqrsp(fcxp, bfa,
614 		&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
615 		&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
616 		nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
617 
618 	bfa_fcxp_init_reqrsp(fcxp, bfa,
619 		&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
620 		&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
621 		nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
622 
623 }
624 
625 static void
626 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
627 {
628 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
629 	struct bfa_fcxp_wqe_s *wqe;
630 
631 	if (fcxp->req_rsp)
632 		bfa_q_deq(&mod->req_wait_q, &wqe);
633 	else
634 		bfa_q_deq(&mod->rsp_wait_q, &wqe);
635 
636 	if (wqe) {
637 		bfa_trc(mod->bfa, fcxp->fcxp_tag);
638 
639 		bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
640 			wqe->nrsp_sgles, wqe->req_sga_cbfn,
641 			wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
642 			wqe->rsp_sglen_cbfn);
643 
644 		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
645 		return;
646 	}
647 
648 	WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
649 	list_del(&fcxp->qe);
650 
651 	if (fcxp->req_rsp)
652 		list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
653 	else
654 		list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
655 }
656 
657 static void
658 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
659 		   bfa_status_t req_status, u32 rsp_len,
660 		   u32 resid_len, struct fchs_s *rsp_fchs)
661 {
662 	/* discarded fcxp completion */
663 }
664 
665 static void
666 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
667 {
668 	struct bfa_fcxp_s *fcxp = cbarg;
669 
670 	if (complete) {
671 		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
672 				fcxp->rsp_status, fcxp->rsp_len,
673 				fcxp->residue_len, &fcxp->rsp_fchs);
674 	} else {
675 		bfa_fcxp_free(fcxp);
676 	}
677 }
678 
679 static void
680 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
681 {
682 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
683 	struct bfa_fcxp_s	*fcxp;
684 	u16		fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
685 
686 	bfa_trc(bfa, fcxp_tag);
687 
688 	fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
689 
690 	/*
691 	 * @todo f/w should not set residue to non-0 when everything
692 	 *	 is received.
693 	 */
694 	if (fcxp_rsp->req_status == BFA_STATUS_OK)
695 		fcxp_rsp->residue_len = 0;
696 	else
697 		fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
698 
699 	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
700 
701 	WARN_ON(fcxp->send_cbfn == NULL);
702 
703 	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
704 
705 	if (fcxp->send_cbfn != NULL) {
706 		bfa_trc(mod->bfa, (NULL == fcxp->caller));
707 		if (fcxp->caller == NULL) {
708 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
709 					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
710 					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
711 			/*
712 			 * fcxp automatically freed on return from the callback
713 			 */
714 			bfa_fcxp_free(fcxp);
715 		} else {
716 			fcxp->rsp_status = fcxp_rsp->req_status;
717 			fcxp->rsp_len = fcxp_rsp->rsp_len;
718 			fcxp->residue_len = fcxp_rsp->residue_len;
719 			fcxp->rsp_fchs = fcxp_rsp->fchs;
720 
721 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
722 					__bfa_fcxp_send_cbfn, fcxp);
723 		}
724 	} else {
725 		bfa_trc(bfa, (NULL == fcxp->send_cbfn));
726 	}
727 }
728 
729 static void
730 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
731 		 struct fchs_s *fchs)
732 {
733 	/*
734 	 * TODO: TX ox_id
735 	 */
736 	if (reqlen > 0) {
737 		if (fcxp->use_ireqbuf) {
738 			u32	pld_w0 =
739 				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
740 
741 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
742 					BFA_PL_EID_TX,
743 					reqlen + sizeof(struct fchs_s), fchs,
744 					pld_w0);
745 		} else {
746 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
747 					BFA_PL_EID_TX,
748 					reqlen + sizeof(struct fchs_s),
749 					fchs);
750 		}
751 	} else {
752 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
753 			       reqlen + sizeof(struct fchs_s), fchs);
754 	}
755 }
756 
757 static void
758 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
759 		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
760 {
761 	if (fcxp_rsp->rsp_len > 0) {
762 		if (fcxp->use_irspbuf) {
763 			u32	pld_w0 =
764 				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
765 
766 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
767 					      BFA_PL_EID_RX,
768 					      (u16) fcxp_rsp->rsp_len,
769 					      &fcxp_rsp->fchs, pld_w0);
770 		} else {
771 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
772 				       BFA_PL_EID_RX,
773 				       (u16) fcxp_rsp->rsp_len,
774 				       &fcxp_rsp->fchs);
775 		}
776 	} else {
777 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
778 			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
779 	}
780 }
781 
782 /*
783  * Handler to resume sending fcxp when space in available in cpe queue.
784  */
785 static void
786 bfa_fcxp_qresume(void *cbarg)
787 {
788 	struct bfa_fcxp_s		*fcxp = cbarg;
789 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
790 	struct bfi_fcxp_send_req_s	*send_req;
791 
792 	fcxp->reqq_waiting = BFA_FALSE;
793 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
794 	bfa_fcxp_queue(fcxp, send_req);
795 }
796 
797 /*
798  * Queue fcxp send request to foimrware.
799  */
800 static void
801 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
802 {
803 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
804 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
805 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
806 	struct bfa_rport_s		*rport = reqi->bfa_rport;
807 
808 	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
809 		    bfa_fn_lpu(bfa));
810 
811 	send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
812 	if (rport) {
813 		send_req->rport_fw_hndl = rport->fw_handle;
814 		send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
815 		if (send_req->max_frmsz == 0)
816 			send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
817 	} else {
818 		send_req->rport_fw_hndl = 0;
819 		send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
820 	}
821 
822 	send_req->vf_id = cpu_to_be16(reqi->vf_id);
823 	send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
824 	send_req->class = reqi->class;
825 	send_req->rsp_timeout = rspi->rsp_timeout;
826 	send_req->cts = reqi->cts;
827 	send_req->fchs = reqi->fchs;
828 
829 	send_req->req_len = cpu_to_be32(reqi->req_tot_len);
830 	send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
831 
832 	/*
833 	 * setup req sgles
834 	 */
835 	if (fcxp->use_ireqbuf == 1) {
836 		bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
837 					BFA_FCXP_REQ_PLD_PA(fcxp));
838 	} else {
839 		if (fcxp->nreq_sgles > 0) {
840 			WARN_ON(fcxp->nreq_sgles != 1);
841 			bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
842 				fcxp->req_sga_cbfn(fcxp->caller, 0));
843 		} else {
844 			WARN_ON(reqi->req_tot_len != 0);
845 			bfa_alen_set(&send_req->rsp_alen, 0, 0);
846 		}
847 	}
848 
849 	/*
850 	 * setup rsp sgles
851 	 */
852 	if (fcxp->use_irspbuf == 1) {
853 		WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
854 
855 		bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
856 					BFA_FCXP_RSP_PLD_PA(fcxp));
857 	} else {
858 		if (fcxp->nrsp_sgles > 0) {
859 			WARN_ON(fcxp->nrsp_sgles != 1);
860 			bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
861 				fcxp->rsp_sga_cbfn(fcxp->caller, 0));
862 
863 		} else {
864 			WARN_ON(rspi->rsp_maxlen != 0);
865 			bfa_alen_set(&send_req->rsp_alen, 0, 0);
866 		}
867 	}
868 
869 	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
870 
871 	bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
872 
873 	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
874 	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
875 }
876 
877 /*
878  * Allocate an FCXP instance to send a response or to send a request
879  * that has a response. Request/response buffers are allocated by caller.
880  *
881  * @param[in]	bfa		BFA bfa instance
882  * @param[in]	nreq_sgles	Number of SG elements required for request
883  *				buffer. 0, if fcxp internal buffers are	used.
884  *				Use bfa_fcxp_get_reqbuf() to get the
885  *				internal req buffer.
886  * @param[in]	req_sgles	SG elements describing request buffer. Will be
887  *				copied in by BFA and hence can be freed on
888  *				return from this function.
889  * @param[in]	get_req_sga	function ptr to be called to get a request SG
890  *				Address (given the sge index).
891  * @param[in]	get_req_sglen	function ptr to be called to get a request SG
892  *				len (given the sge index).
893  * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
894  *				Address (given the sge index).
895  * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
896  *				len (given the sge index).
897  * @param[in]	req		Allocated FCXP is used to send req or rsp?
898  *				request - BFA_TRUE, response - BFA_FALSE
899  *
900  * @return FCXP instance. NULL on failure.
901  */
902 struct bfa_fcxp_s *
903 bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
904 		int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
905 		bfa_fcxp_get_sglen_t req_sglen_cbfn,
906 		bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
907 		bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
908 {
909 	struct bfa_fcxp_s *fcxp = NULL;
910 
911 	WARN_ON(bfa == NULL);
912 
913 	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
914 	if (fcxp == NULL)
915 		return NULL;
916 
917 	bfa_trc(bfa, fcxp->fcxp_tag);
918 
919 	bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
920 			req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
921 
922 	return fcxp;
923 }
924 
925 /*
926  * Get the internal request buffer pointer
927  *
928  * @param[in]	fcxp	BFA fcxp pointer
929  *
930  * @return		pointer to the internal request buffer
931  */
932 void *
933 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
934 {
935 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
936 	void	*reqbuf;
937 
938 	WARN_ON(fcxp->use_ireqbuf != 1);
939 	reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
940 				mod->req_pld_sz + mod->rsp_pld_sz);
941 	return reqbuf;
942 }
943 
944 u32
945 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
946 {
947 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
948 
949 	return mod->req_pld_sz;
950 }
951 
952 /*
953  * Get the internal response buffer pointer
954  *
955  * @param[in]	fcxp	BFA fcxp pointer
956  *
957  * @return		pointer to the internal request buffer
958  */
959 void *
960 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
961 {
962 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
963 	void	*fcxp_buf;
964 
965 	WARN_ON(fcxp->use_irspbuf != 1);
966 
967 	fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
968 				mod->req_pld_sz + mod->rsp_pld_sz);
969 
970 	/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
971 	return ((u8 *) fcxp_buf) + mod->req_pld_sz;
972 }
973 
974 /*
975  * Free the BFA FCXP
976  *
977  * @param[in]	fcxp			BFA fcxp pointer
978  *
979  * @return		void
980  */
981 void
982 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
983 {
984 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
985 
986 	WARN_ON(fcxp == NULL);
987 	bfa_trc(mod->bfa, fcxp->fcxp_tag);
988 	bfa_fcxp_put(fcxp);
989 }
990 
991 /*
992  * Send a FCXP request
993  *
994  * @param[in]	fcxp	BFA fcxp pointer
995  * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
996  * @param[in]	vf_id	virtual Fabric ID
997  * @param[in]	lp_tag	lport tag
998  * @param[in]	cts	use Continuous sequence
999  * @param[in]	cos	fc Class of Service
1000  * @param[in]	reqlen	request length, does not include FCHS length
1001  * @param[in]	fchs	fc Header Pointer. The header content will be copied
1002  *			in by BFA.
1003  *
1004  * @param[in]	cbfn	call back function to be called on receiving
1005  *								the response
1006  * @param[in]	cbarg	arg for cbfn
1007  * @param[in]	rsp_timeout
1008  *			response timeout
1009  *
1010  * @return		bfa_status_t
1011  */
1012 void
1013 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1014 	      u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1015 	      u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1016 	      void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1017 {
1018 	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
1019 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
1020 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
1021 	struct bfi_fcxp_send_req_s	*send_req;
1022 
1023 	bfa_trc(bfa, fcxp->fcxp_tag);
1024 
1025 	/*
1026 	 * setup request/response info
1027 	 */
1028 	reqi->bfa_rport = rport;
1029 	reqi->vf_id = vf_id;
1030 	reqi->lp_tag = lp_tag;
1031 	reqi->class = cos;
1032 	rspi->rsp_timeout = rsp_timeout;
1033 	reqi->cts = cts;
1034 	reqi->fchs = *fchs;
1035 	reqi->req_tot_len = reqlen;
1036 	rspi->rsp_maxlen = rsp_maxlen;
1037 	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1038 	fcxp->send_cbarg = cbarg;
1039 
1040 	/*
1041 	 * If no room in CPE queue, wait for space in request queue
1042 	 */
1043 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1044 	if (!send_req) {
1045 		bfa_trc(bfa, fcxp->fcxp_tag);
1046 		fcxp->reqq_waiting = BFA_TRUE;
1047 		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1048 		return;
1049 	}
1050 
1051 	bfa_fcxp_queue(fcxp, send_req);
1052 }
1053 
1054 /*
1055  * Abort a BFA FCXP
1056  *
1057  * @param[in]	fcxp	BFA fcxp pointer
1058  *
1059  * @return		void
1060  */
1061 bfa_status_t
1062 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1063 {
1064 	bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1065 	WARN_ON(1);
1066 	return BFA_STATUS_OK;
1067 }
1068 
1069 void
1070 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1071 	       bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1072 	       void *caller, int nreq_sgles,
1073 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1074 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
1075 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1076 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1077 {
1078 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1079 
1080 	if (req)
1081 		WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1082 	else
1083 		WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1084 
1085 	wqe->alloc_cbfn = alloc_cbfn;
1086 	wqe->alloc_cbarg = alloc_cbarg;
1087 	wqe->caller = caller;
1088 	wqe->bfa = bfa;
1089 	wqe->nreq_sgles = nreq_sgles;
1090 	wqe->nrsp_sgles = nrsp_sgles;
1091 	wqe->req_sga_cbfn = req_sga_cbfn;
1092 	wqe->req_sglen_cbfn = req_sglen_cbfn;
1093 	wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1094 	wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1095 
1096 	if (req)
1097 		list_add_tail(&wqe->qe, &mod->req_wait_q);
1098 	else
1099 		list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1100 }
1101 
1102 void
1103 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1104 {
1105 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1106 
1107 	WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1108 		!bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1109 	list_del(&wqe->qe);
1110 }
1111 
1112 void
1113 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1114 {
1115 	/*
1116 	 * If waiting for room in request queue, cancel reqq wait
1117 	 * and free fcxp.
1118 	 */
1119 	if (fcxp->reqq_waiting) {
1120 		fcxp->reqq_waiting = BFA_FALSE;
1121 		bfa_reqq_wcancel(&fcxp->reqq_wqe);
1122 		bfa_fcxp_free(fcxp);
1123 		return;
1124 	}
1125 
1126 	fcxp->send_cbfn = bfa_fcxp_null_comp;
1127 }
1128 
1129 void
1130 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1131 {
1132 	switch (msg->mhdr.msg_id) {
1133 	case BFI_FCXP_I2H_SEND_RSP:
1134 		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1135 		break;
1136 
1137 	default:
1138 		bfa_trc(bfa, msg->mhdr.msg_id);
1139 		WARN_ON(1);
1140 	}
1141 }
1142 
1143 u32
1144 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1145 {
1146 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1147 
1148 	return mod->rsp_pld_sz;
1149 }
1150 
1151 void
1152 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1153 {
1154 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
1155 	struct list_head	*qe;
1156 	int	i;
1157 
1158 	for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1159 		if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1160 			bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1161 			list_add_tail(qe, &mod->fcxp_req_unused_q);
1162 		} else {
1163 			bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1164 			list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1165 		}
1166 	}
1167 }
1168 
1169 /*
1170  *  BFA LPS state machine functions
1171  */
1172 
1173 /*
1174  * Init state -- no login
1175  */
1176 static void
1177 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1178 {
1179 	bfa_trc(lps->bfa, lps->bfa_tag);
1180 	bfa_trc(lps->bfa, event);
1181 
1182 	switch (event) {
1183 	case BFA_LPS_SM_LOGIN:
1184 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1185 			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1186 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1187 		} else {
1188 			bfa_sm_set_state(lps, bfa_lps_sm_login);
1189 			bfa_lps_send_login(lps);
1190 		}
1191 
1192 		if (lps->fdisc)
1193 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1194 				BFA_PL_EID_LOGIN, 0, "FDISC Request");
1195 		else
1196 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1197 				BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1198 		break;
1199 
1200 	case BFA_LPS_SM_LOGOUT:
1201 		bfa_lps_logout_comp(lps);
1202 		break;
1203 
1204 	case BFA_LPS_SM_DELETE:
1205 		bfa_lps_free(lps);
1206 		break;
1207 
1208 	case BFA_LPS_SM_RX_CVL:
1209 	case BFA_LPS_SM_OFFLINE:
1210 		break;
1211 
1212 	case BFA_LPS_SM_FWRSP:
1213 		/*
1214 		 * Could happen when fabric detects loopback and discards
1215 		 * the lps request. Fw will eventually sent out the timeout
1216 		 * Just ignore
1217 		 */
1218 		break;
1219 	case BFA_LPS_SM_SET_N2N_PID:
1220 		/*
1221 		 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1222 		 * this event. Ignore this event.
1223 		 */
1224 		break;
1225 
1226 	default:
1227 		bfa_sm_fault(lps->bfa, event);
1228 	}
1229 }
1230 
1231 /*
1232  * login is in progress -- awaiting response from firmware
1233  */
1234 static void
1235 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1236 {
1237 	bfa_trc(lps->bfa, lps->bfa_tag);
1238 	bfa_trc(lps->bfa, event);
1239 
1240 	switch (event) {
1241 	case BFA_LPS_SM_FWRSP:
1242 		if (lps->status == BFA_STATUS_OK) {
1243 			bfa_sm_set_state(lps, bfa_lps_sm_online);
1244 			if (lps->fdisc)
1245 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1246 					BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1247 			else
1248 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1249 					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1250 			/* If N2N, send the assigned PID to FW */
1251 			bfa_trc(lps->bfa, lps->fport);
1252 			bfa_trc(lps->bfa, lps->lp_pid);
1253 
1254 			if (!lps->fport && lps->lp_pid)
1255 				bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1256 		} else {
1257 			bfa_sm_set_state(lps, bfa_lps_sm_init);
1258 			if (lps->fdisc)
1259 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1260 					BFA_PL_EID_LOGIN, 0,
1261 					"FDISC Fail (RJT or timeout)");
1262 			else
1263 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1264 					BFA_PL_EID_LOGIN, 0,
1265 					"FLOGI Fail (RJT or timeout)");
1266 		}
1267 		bfa_lps_login_comp(lps);
1268 		break;
1269 
1270 	case BFA_LPS_SM_OFFLINE:
1271 	case BFA_LPS_SM_DELETE:
1272 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1273 		break;
1274 
1275 	case BFA_LPS_SM_SET_N2N_PID:
1276 		bfa_trc(lps->bfa, lps->fport);
1277 		bfa_trc(lps->bfa, lps->lp_pid);
1278 		break;
1279 
1280 	default:
1281 		bfa_sm_fault(lps->bfa, event);
1282 	}
1283 }
1284 
1285 /*
1286  * login pending - awaiting space in request queue
1287  */
1288 static void
1289 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1290 {
1291 	bfa_trc(lps->bfa, lps->bfa_tag);
1292 	bfa_trc(lps->bfa, event);
1293 
1294 	switch (event) {
1295 	case BFA_LPS_SM_RESUME:
1296 		bfa_sm_set_state(lps, bfa_lps_sm_login);
1297 		bfa_lps_send_login(lps);
1298 		break;
1299 
1300 	case BFA_LPS_SM_OFFLINE:
1301 	case BFA_LPS_SM_DELETE:
1302 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1303 		bfa_reqq_wcancel(&lps->wqe);
1304 		break;
1305 
1306 	case BFA_LPS_SM_RX_CVL:
1307 		/*
1308 		 * Login was not even sent out; so when getting out
1309 		 * of this state, it will appear like a login retry
1310 		 * after Clear virtual link
1311 		 */
1312 		break;
1313 
1314 	default:
1315 		bfa_sm_fault(lps->bfa, event);
1316 	}
1317 }
1318 
1319 /*
1320  * login complete
1321  */
1322 static void
1323 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1324 {
1325 	bfa_trc(lps->bfa, lps->bfa_tag);
1326 	bfa_trc(lps->bfa, event);
1327 
1328 	switch (event) {
1329 	case BFA_LPS_SM_LOGOUT:
1330 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1331 			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1332 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1333 		} else {
1334 			bfa_sm_set_state(lps, bfa_lps_sm_logout);
1335 			bfa_lps_send_logout(lps);
1336 		}
1337 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1338 			BFA_PL_EID_LOGO, 0, "Logout");
1339 		break;
1340 
1341 	case BFA_LPS_SM_RX_CVL:
1342 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1343 
1344 		/* Let the vport module know about this event */
1345 		bfa_lps_cvl_event(lps);
1346 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1347 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1348 		break;
1349 
1350 	case BFA_LPS_SM_SET_N2N_PID:
1351 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1352 			bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1353 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1354 		} else
1355 			bfa_lps_send_set_n2n_pid(lps);
1356 		break;
1357 
1358 	case BFA_LPS_SM_OFFLINE:
1359 	case BFA_LPS_SM_DELETE:
1360 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1361 		break;
1362 
1363 	default:
1364 		bfa_sm_fault(lps->bfa, event);
1365 	}
1366 }
1367 
1368 /*
1369  * login complete
1370  */
1371 static void
1372 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1373 {
1374 	bfa_trc(lps->bfa, lps->bfa_tag);
1375 	bfa_trc(lps->bfa, event);
1376 
1377 	switch (event) {
1378 	case BFA_LPS_SM_RESUME:
1379 		bfa_sm_set_state(lps, bfa_lps_sm_online);
1380 		bfa_lps_send_set_n2n_pid(lps);
1381 		break;
1382 
1383 	case BFA_LPS_SM_LOGOUT:
1384 		bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1385 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1386 			BFA_PL_EID_LOGO, 0, "Logout");
1387 		break;
1388 
1389 	case BFA_LPS_SM_RX_CVL:
1390 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1391 		bfa_reqq_wcancel(&lps->wqe);
1392 
1393 		/* Let the vport module know about this event */
1394 		bfa_lps_cvl_event(lps);
1395 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1396 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1397 		break;
1398 
1399 	case BFA_LPS_SM_OFFLINE:
1400 	case BFA_LPS_SM_DELETE:
1401 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1402 		bfa_reqq_wcancel(&lps->wqe);
1403 		break;
1404 
1405 	default:
1406 		bfa_sm_fault(lps->bfa, event);
1407 	}
1408 }
1409 
1410 /*
1411  * logout in progress - awaiting firmware response
1412  */
1413 static void
1414 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1415 {
1416 	bfa_trc(lps->bfa, lps->bfa_tag);
1417 	bfa_trc(lps->bfa, event);
1418 
1419 	switch (event) {
1420 	case BFA_LPS_SM_FWRSP:
1421 	case BFA_LPS_SM_OFFLINE:
1422 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1423 		bfa_lps_logout_comp(lps);
1424 		break;
1425 
1426 	case BFA_LPS_SM_DELETE:
1427 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1428 		break;
1429 
1430 	default:
1431 		bfa_sm_fault(lps->bfa, event);
1432 	}
1433 }
1434 
1435 /*
1436  * logout pending -- awaiting space in request queue
1437  */
1438 static void
1439 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1440 {
1441 	bfa_trc(lps->bfa, lps->bfa_tag);
1442 	bfa_trc(lps->bfa, event);
1443 
1444 	switch (event) {
1445 	case BFA_LPS_SM_RESUME:
1446 		bfa_sm_set_state(lps, bfa_lps_sm_logout);
1447 		bfa_lps_send_logout(lps);
1448 		break;
1449 
1450 	case BFA_LPS_SM_OFFLINE:
1451 	case BFA_LPS_SM_DELETE:
1452 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1453 		bfa_reqq_wcancel(&lps->wqe);
1454 		break;
1455 
1456 	default:
1457 		bfa_sm_fault(lps->bfa, event);
1458 	}
1459 }
1460 
1461 
1462 
1463 /*
1464  *  lps_pvt BFA LPS private functions
1465  */
1466 
1467 /*
1468  * return memory requirement
1469  */
1470 void
1471 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1472 		struct bfa_s *bfa)
1473 {
1474 	struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1475 
1476 	if (cfg->drvcfg.min_cfg)
1477 		bfa_mem_kva_setup(minfo, lps_kva,
1478 			sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1479 	else
1480 		bfa_mem_kva_setup(minfo, lps_kva,
1481 			sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1482 }
1483 
1484 /*
1485  * bfa module attach at initialization time
1486  */
1487 void
1488 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1489 	struct bfa_pcidev_s *pcidev)
1490 {
1491 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1492 	struct bfa_lps_s	*lps;
1493 	int			i;
1494 
1495 	mod->num_lps = BFA_LPS_MAX_LPORTS;
1496 	if (cfg->drvcfg.min_cfg)
1497 		mod->num_lps = BFA_LPS_MIN_LPORTS;
1498 	else
1499 		mod->num_lps = BFA_LPS_MAX_LPORTS;
1500 	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1501 
1502 	bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1503 
1504 	INIT_LIST_HEAD(&mod->lps_free_q);
1505 	INIT_LIST_HEAD(&mod->lps_active_q);
1506 	INIT_LIST_HEAD(&mod->lps_login_q);
1507 
1508 	for (i = 0; i < mod->num_lps; i++, lps++) {
1509 		lps->bfa	= bfa;
1510 		lps->bfa_tag	= (u8) i;
1511 		lps->reqq	= BFA_REQQ_LPS;
1512 		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1513 		list_add_tail(&lps->qe, &mod->lps_free_q);
1514 	}
1515 }
1516 
1517 /*
1518  * IOC in disabled state -- consider all lps offline
1519  */
1520 void
1521 bfa_lps_iocdisable(struct bfa_s *bfa)
1522 {
1523 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1524 	struct bfa_lps_s	*lps;
1525 	struct list_head		*qe, *qen;
1526 
1527 	list_for_each_safe(qe, qen, &mod->lps_active_q) {
1528 		lps = (struct bfa_lps_s *) qe;
1529 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1530 	}
1531 	list_for_each_safe(qe, qen, &mod->lps_login_q) {
1532 		lps = (struct bfa_lps_s *) qe;
1533 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1534 	}
1535 	list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1536 }
1537 
1538 /*
1539  * Firmware login response
1540  */
1541 static void
1542 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1543 {
1544 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1545 	struct bfa_lps_s	*lps;
1546 
1547 	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1548 	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1549 
1550 	lps->status = rsp->status;
1551 	switch (rsp->status) {
1552 	case BFA_STATUS_OK:
1553 		lps->fw_tag	= rsp->fw_tag;
1554 		lps->fport	= rsp->f_port;
1555 		if (lps->fport)
1556 			lps->lp_pid = rsp->lp_pid;
1557 		lps->npiv_en	= rsp->npiv_en;
1558 		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
1559 		lps->pr_pwwn	= rsp->port_name;
1560 		lps->pr_nwwn	= rsp->node_name;
1561 		lps->auth_req	= rsp->auth_req;
1562 		lps->lp_mac	= rsp->lp_mac;
1563 		lps->brcd_switch = rsp->brcd_switch;
1564 		lps->fcf_mac	= rsp->fcf_mac;
1565 
1566 		break;
1567 
1568 	case BFA_STATUS_FABRIC_RJT:
1569 		lps->lsrjt_rsn = rsp->lsrjt_rsn;
1570 		lps->lsrjt_expl = rsp->lsrjt_expl;
1571 
1572 		break;
1573 
1574 	case BFA_STATUS_EPROTOCOL:
1575 		lps->ext_status = rsp->ext_status;
1576 
1577 		break;
1578 
1579 	case BFA_STATUS_VPORT_MAX:
1580 		if (rsp->ext_status)
1581 			bfa_lps_no_res(lps, rsp->ext_status);
1582 		break;
1583 
1584 	default:
1585 		/* Nothing to do with other status */
1586 		break;
1587 	}
1588 
1589 	list_del(&lps->qe);
1590 	list_add_tail(&lps->qe, &mod->lps_active_q);
1591 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1592 }
1593 
1594 static void
1595 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1596 {
1597 	struct bfa_s		*bfa = first_lps->bfa;
1598 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1599 	struct list_head	*qe, *qe_next;
1600 	struct bfa_lps_s	*lps;
1601 
1602 	bfa_trc(bfa, count);
1603 
1604 	qe = bfa_q_next(first_lps);
1605 
1606 	while (count && qe) {
1607 		qe_next = bfa_q_next(qe);
1608 		lps = (struct bfa_lps_s *)qe;
1609 		bfa_trc(bfa, lps->bfa_tag);
1610 		lps->status = first_lps->status;
1611 		list_del(&lps->qe);
1612 		list_add_tail(&lps->qe, &mod->lps_active_q);
1613 		bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1614 		qe = qe_next;
1615 		count--;
1616 	}
1617 }
1618 
1619 /*
1620  * Firmware logout response
1621  */
1622 static void
1623 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1624 {
1625 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1626 	struct bfa_lps_s	*lps;
1627 
1628 	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1629 	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1630 
1631 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1632 }
1633 
1634 /*
1635  * Firmware received a Clear virtual link request (for FCoE)
1636  */
1637 static void
1638 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1639 {
1640 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1641 	struct bfa_lps_s	*lps;
1642 
1643 	lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1644 
1645 	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1646 }
1647 
1648 /*
1649  * Space is available in request queue, resume queueing request to firmware.
1650  */
1651 static void
1652 bfa_lps_reqq_resume(void *lps_arg)
1653 {
1654 	struct bfa_lps_s	*lps = lps_arg;
1655 
1656 	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1657 }
1658 
1659 /*
1660  * lps is freed -- triggered by vport delete
1661  */
1662 static void
1663 bfa_lps_free(struct bfa_lps_s *lps)
1664 {
1665 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1666 
1667 	lps->lp_pid = 0;
1668 	list_del(&lps->qe);
1669 	list_add_tail(&lps->qe, &mod->lps_free_q);
1670 }
1671 
1672 /*
1673  * send login request to firmware
1674  */
1675 static void
1676 bfa_lps_send_login(struct bfa_lps_s *lps)
1677 {
1678 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1679 	struct bfi_lps_login_req_s	*m;
1680 
1681 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1682 	WARN_ON(!m);
1683 
1684 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1685 		bfa_fn_lpu(lps->bfa));
1686 
1687 	m->bfa_tag	= lps->bfa_tag;
1688 	m->alpa		= lps->alpa;
1689 	m->pdu_size	= cpu_to_be16(lps->pdusz);
1690 	m->pwwn		= lps->pwwn;
1691 	m->nwwn		= lps->nwwn;
1692 	m->fdisc	= lps->fdisc;
1693 	m->auth_en	= lps->auth_en;
1694 
1695 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1696 	list_del(&lps->qe);
1697 	list_add_tail(&lps->qe, &mod->lps_login_q);
1698 }
1699 
1700 /*
1701  * send logout request to firmware
1702  */
1703 static void
1704 bfa_lps_send_logout(struct bfa_lps_s *lps)
1705 {
1706 	struct bfi_lps_logout_req_s *m;
1707 
1708 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1709 	WARN_ON(!m);
1710 
1711 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1712 		bfa_fn_lpu(lps->bfa));
1713 
1714 	m->fw_tag = lps->fw_tag;
1715 	m->port_name = lps->pwwn;
1716 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1717 }
1718 
1719 /*
1720  * send n2n pid set request to firmware
1721  */
1722 static void
1723 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1724 {
1725 	struct bfi_lps_n2n_pid_req_s *m;
1726 
1727 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1728 	WARN_ON(!m);
1729 
1730 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1731 		bfa_fn_lpu(lps->bfa));
1732 
1733 	m->fw_tag = lps->fw_tag;
1734 	m->lp_pid = lps->lp_pid;
1735 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1736 }
1737 
1738 /*
1739  * Indirect login completion handler for non-fcs
1740  */
1741 static void
1742 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1743 {
1744 	struct bfa_lps_s *lps	= arg;
1745 
1746 	if (!complete)
1747 		return;
1748 
1749 	if (lps->fdisc)
1750 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1751 	else
1752 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1753 }
1754 
1755 /*
1756  * Login completion handler -- direct call for fcs, queue for others
1757  */
1758 static void
1759 bfa_lps_login_comp(struct bfa_lps_s *lps)
1760 {
1761 	if (!lps->bfa->fcs) {
1762 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1763 			lps);
1764 		return;
1765 	}
1766 
1767 	if (lps->fdisc)
1768 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1769 	else
1770 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1771 }
1772 
1773 /*
1774  * Indirect logout completion handler for non-fcs
1775  */
1776 static void
1777 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1778 {
1779 	struct bfa_lps_s *lps	= arg;
1780 
1781 	if (!complete)
1782 		return;
1783 
1784 	if (lps->fdisc)
1785 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1786 	else
1787 		bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1788 }
1789 
1790 /*
1791  * Logout completion handler -- direct call for fcs, queue for others
1792  */
1793 static void
1794 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1795 {
1796 	if (!lps->bfa->fcs) {
1797 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1798 			lps);
1799 		return;
1800 	}
1801 	if (lps->fdisc)
1802 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1803 }
1804 
1805 /*
1806  * Clear virtual link completion handler for non-fcs
1807  */
1808 static void
1809 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1810 {
1811 	struct bfa_lps_s *lps	= arg;
1812 
1813 	if (!complete)
1814 		return;
1815 
1816 	/* Clear virtual link to base port will result in link down */
1817 	if (lps->fdisc)
1818 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1819 }
1820 
1821 /*
1822  * Received Clear virtual link event --direct call for fcs,
1823  * queue for others
1824  */
1825 static void
1826 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1827 {
1828 	if (!lps->bfa->fcs) {
1829 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1830 			lps);
1831 		return;
1832 	}
1833 
1834 	/* Clear virtual link to base port will result in link down */
1835 	if (lps->fdisc)
1836 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1837 }
1838 
1839 
1840 
1841 /*
1842  *  lps_public BFA LPS public functions
1843  */
1844 
1845 u32
1846 bfa_lps_get_max_vport(struct bfa_s *bfa)
1847 {
1848 	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1849 		return BFA_LPS_MAX_VPORTS_SUPP_CT;
1850 	else
1851 		return BFA_LPS_MAX_VPORTS_SUPP_CB;
1852 }
1853 
1854 /*
1855  * Allocate a lport srvice tag.
1856  */
1857 struct bfa_lps_s  *
1858 bfa_lps_alloc(struct bfa_s *bfa)
1859 {
1860 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1861 	struct bfa_lps_s	*lps = NULL;
1862 
1863 	bfa_q_deq(&mod->lps_free_q, &lps);
1864 
1865 	if (lps == NULL)
1866 		return NULL;
1867 
1868 	list_add_tail(&lps->qe, &mod->lps_active_q);
1869 
1870 	bfa_sm_set_state(lps, bfa_lps_sm_init);
1871 	return lps;
1872 }
1873 
1874 /*
1875  * Free lport service tag. This can be called anytime after an alloc.
1876  * No need to wait for any pending login/logout completions.
1877  */
1878 void
1879 bfa_lps_delete(struct bfa_lps_s *lps)
1880 {
1881 	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1882 }
1883 
1884 /*
1885  * Initiate a lport login.
1886  */
1887 void
1888 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1889 	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1890 {
1891 	lps->uarg	= uarg;
1892 	lps->alpa	= alpa;
1893 	lps->pdusz	= pdusz;
1894 	lps->pwwn	= pwwn;
1895 	lps->nwwn	= nwwn;
1896 	lps->fdisc	= BFA_FALSE;
1897 	lps->auth_en	= auth_en;
1898 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1899 }
1900 
1901 /*
1902  * Initiate a lport fdisc login.
1903  */
1904 void
1905 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1906 	wwn_t nwwn)
1907 {
1908 	lps->uarg	= uarg;
1909 	lps->alpa	= 0;
1910 	lps->pdusz	= pdusz;
1911 	lps->pwwn	= pwwn;
1912 	lps->nwwn	= nwwn;
1913 	lps->fdisc	= BFA_TRUE;
1914 	lps->auth_en	= BFA_FALSE;
1915 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1916 }
1917 
1918 
1919 /*
1920  * Initiate a lport FDSIC logout.
1921  */
1922 void
1923 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1924 {
1925 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1926 }
1927 
1928 u8
1929 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1930 {
1931 	struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1932 
1933 	return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1934 }
1935 
1936 /*
1937  * Return lport services tag given the pid
1938  */
1939 u8
1940 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1941 {
1942 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1943 	struct bfa_lps_s	*lps;
1944 	int			i;
1945 
1946 	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1947 		if (lps->lp_pid == pid)
1948 			return lps->bfa_tag;
1949 	}
1950 
1951 	/* Return base port tag anyway */
1952 	return 0;
1953 }
1954 
1955 
1956 /*
1957  * return port id assigned to the base lport
1958  */
1959 u32
1960 bfa_lps_get_base_pid(struct bfa_s *bfa)
1961 {
1962 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1963 
1964 	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1965 }
1966 
1967 /*
1968  * Set PID in case of n2n (which is assigned during PLOGI)
1969  */
1970 void
1971 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1972 {
1973 	bfa_trc(lps->bfa, lps->bfa_tag);
1974 	bfa_trc(lps->bfa, n2n_pid);
1975 
1976 	lps->lp_pid = n2n_pid;
1977 	bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1978 }
1979 
1980 /*
1981  * LPS firmware message class handler.
1982  */
1983 void
1984 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1985 {
1986 	union bfi_lps_i2h_msg_u	msg;
1987 
1988 	bfa_trc(bfa, m->mhdr.msg_id);
1989 	msg.msg = m;
1990 
1991 	switch (m->mhdr.msg_id) {
1992 	case BFI_LPS_I2H_LOGIN_RSP:
1993 		bfa_lps_login_rsp(bfa, msg.login_rsp);
1994 		break;
1995 
1996 	case BFI_LPS_I2H_LOGOUT_RSP:
1997 		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1998 		break;
1999 
2000 	case BFI_LPS_I2H_CVL_EVENT:
2001 		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2002 		break;
2003 
2004 	default:
2005 		bfa_trc(bfa, m->mhdr.msg_id);
2006 		WARN_ON(1);
2007 	}
2008 }
2009 
2010 static void
2011 bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2012 {
2013 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2014 	struct bfa_aen_entry_s  *aen_entry;
2015 
2016 	bfad_get_aen_entry(bfad, aen_entry);
2017 	if (!aen_entry)
2018 		return;
2019 
2020 	aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2021 	aen_entry->aen_data.port.pwwn = fcport->pwwn;
2022 
2023 	/* Send the AEN notification */
2024 	bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2025 				  BFA_AEN_CAT_PORT, event);
2026 }
2027 
2028 /*
2029  * FC PORT state machine functions
2030  */
2031 static void
2032 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2033 			enum bfa_fcport_sm_event event)
2034 {
2035 	bfa_trc(fcport->bfa, event);
2036 
2037 	switch (event) {
2038 	case BFA_FCPORT_SM_START:
2039 		/*
2040 		 * Start event after IOC is configured and BFA is started.
2041 		 */
2042 		fcport->use_flash_cfg = BFA_TRUE;
2043 
2044 		if (bfa_fcport_send_enable(fcport)) {
2045 			bfa_trc(fcport->bfa, BFA_TRUE);
2046 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2047 		} else {
2048 			bfa_trc(fcport->bfa, BFA_FALSE);
2049 			bfa_sm_set_state(fcport,
2050 					bfa_fcport_sm_enabling_qwait);
2051 		}
2052 		break;
2053 
2054 	case BFA_FCPORT_SM_ENABLE:
2055 		/*
2056 		 * Port is persistently configured to be in enabled state. Do
2057 		 * not change state. Port enabling is done when START event is
2058 		 * received.
2059 		 */
2060 		break;
2061 
2062 	case BFA_FCPORT_SM_DISABLE:
2063 		/*
2064 		 * If a port is persistently configured to be disabled, the
2065 		 * first event will a port disable request.
2066 		 */
2067 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2068 		break;
2069 
2070 	case BFA_FCPORT_SM_HWFAIL:
2071 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2072 		break;
2073 
2074 	default:
2075 		bfa_sm_fault(fcport->bfa, event);
2076 	}
2077 }
2078 
2079 static void
2080 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2081 				enum bfa_fcport_sm_event event)
2082 {
2083 	char pwwn_buf[BFA_STRING_32];
2084 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2085 	bfa_trc(fcport->bfa, event);
2086 
2087 	switch (event) {
2088 	case BFA_FCPORT_SM_QRESUME:
2089 		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2090 		bfa_fcport_send_enable(fcport);
2091 		break;
2092 
2093 	case BFA_FCPORT_SM_STOP:
2094 		bfa_reqq_wcancel(&fcport->reqq_wait);
2095 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2096 		break;
2097 
2098 	case BFA_FCPORT_SM_ENABLE:
2099 		/*
2100 		 * Already enable is in progress.
2101 		 */
2102 		break;
2103 
2104 	case BFA_FCPORT_SM_DISABLE:
2105 		/*
2106 		 * Just send disable request to firmware when room becomes
2107 		 * available in request queue.
2108 		 */
2109 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2110 		bfa_reqq_wcancel(&fcport->reqq_wait);
2111 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2112 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2113 		wwn2str(pwwn_buf, fcport->pwwn);
2114 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2115 			"Base port disabled: WWN = %s\n", pwwn_buf);
2116 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2117 		break;
2118 
2119 	case BFA_FCPORT_SM_LINKUP:
2120 	case BFA_FCPORT_SM_LINKDOWN:
2121 		/*
2122 		 * Possible to get link events when doing back-to-back
2123 		 * enable/disables.
2124 		 */
2125 		break;
2126 
2127 	case BFA_FCPORT_SM_HWFAIL:
2128 		bfa_reqq_wcancel(&fcport->reqq_wait);
2129 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2130 		break;
2131 
2132 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2133 		bfa_fcport_reset_linkinfo(fcport);
2134 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2135 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2136 		break;
2137 
2138 	default:
2139 		bfa_sm_fault(fcport->bfa, event);
2140 	}
2141 }
2142 
2143 static void
2144 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2145 						enum bfa_fcport_sm_event event)
2146 {
2147 	char pwwn_buf[BFA_STRING_32];
2148 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2149 	bfa_trc(fcport->bfa, event);
2150 
2151 	switch (event) {
2152 	case BFA_FCPORT_SM_FWRSP:
2153 	case BFA_FCPORT_SM_LINKDOWN:
2154 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2155 		break;
2156 
2157 	case BFA_FCPORT_SM_LINKUP:
2158 		bfa_fcport_update_linkinfo(fcport);
2159 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2160 
2161 		WARN_ON(!fcport->event_cbfn);
2162 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2163 		break;
2164 
2165 	case BFA_FCPORT_SM_ENABLE:
2166 		/*
2167 		 * Already being enabled.
2168 		 */
2169 		break;
2170 
2171 	case BFA_FCPORT_SM_DISABLE:
2172 		if (bfa_fcport_send_disable(fcport))
2173 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2174 		else
2175 			bfa_sm_set_state(fcport,
2176 					 bfa_fcport_sm_disabling_qwait);
2177 
2178 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2179 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2180 		wwn2str(pwwn_buf, fcport->pwwn);
2181 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2182 			"Base port disabled: WWN = %s\n", pwwn_buf);
2183 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2184 		break;
2185 
2186 	case BFA_FCPORT_SM_STOP:
2187 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2188 		break;
2189 
2190 	case BFA_FCPORT_SM_HWFAIL:
2191 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2192 		break;
2193 
2194 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2195 		bfa_fcport_reset_linkinfo(fcport);
2196 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2197 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2198 		break;
2199 
2200 	default:
2201 		bfa_sm_fault(fcport->bfa, event);
2202 	}
2203 }
2204 
2205 static void
2206 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2207 						enum bfa_fcport_sm_event event)
2208 {
2209 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2210 	char pwwn_buf[BFA_STRING_32];
2211 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2212 
2213 	bfa_trc(fcport->bfa, event);
2214 
2215 	switch (event) {
2216 	case BFA_FCPORT_SM_LINKUP:
2217 		bfa_fcport_update_linkinfo(fcport);
2218 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2219 		WARN_ON(!fcport->event_cbfn);
2220 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2221 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2222 		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2223 
2224 			bfa_trc(fcport->bfa,
2225 				pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2226 			bfa_trc(fcport->bfa,
2227 				pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2228 
2229 			if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2230 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2231 					BFA_PL_EID_FIP_FCF_DISC, 0,
2232 					"FIP FCF Discovery Failed");
2233 			else
2234 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2235 					BFA_PL_EID_FIP_FCF_DISC, 0,
2236 					"FIP FCF Discovered");
2237 		}
2238 
2239 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2240 		wwn2str(pwwn_buf, fcport->pwwn);
2241 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2242 			"Base port online: WWN = %s\n", pwwn_buf);
2243 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2244 
2245 		/* If QoS is enabled and it is not online, send AEN */
2246 		if (fcport->cfg.qos_enabled &&
2247 		    fcport->qos_attr.state != BFA_QOS_ONLINE)
2248 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2249 		break;
2250 
2251 	case BFA_FCPORT_SM_LINKDOWN:
2252 		/*
2253 		 * Possible to get link down event.
2254 		 */
2255 		break;
2256 
2257 	case BFA_FCPORT_SM_ENABLE:
2258 		/*
2259 		 * Already enabled.
2260 		 */
2261 		break;
2262 
2263 	case BFA_FCPORT_SM_DISABLE:
2264 		if (bfa_fcport_send_disable(fcport))
2265 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2266 		else
2267 			bfa_sm_set_state(fcport,
2268 					 bfa_fcport_sm_disabling_qwait);
2269 
2270 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2271 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2272 		wwn2str(pwwn_buf, fcport->pwwn);
2273 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2274 			"Base port disabled: WWN = %s\n", pwwn_buf);
2275 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2276 		break;
2277 
2278 	case BFA_FCPORT_SM_STOP:
2279 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2280 		break;
2281 
2282 	case BFA_FCPORT_SM_HWFAIL:
2283 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2284 		break;
2285 
2286 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2287 		bfa_fcport_reset_linkinfo(fcport);
2288 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2289 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2290 		break;
2291 
2292 	default:
2293 		bfa_sm_fault(fcport->bfa, event);
2294 	}
2295 }
2296 
2297 static void
2298 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2299 	enum bfa_fcport_sm_event event)
2300 {
2301 	char pwwn_buf[BFA_STRING_32];
2302 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2303 
2304 	bfa_trc(fcport->bfa, event);
2305 
2306 	switch (event) {
2307 	case BFA_FCPORT_SM_ENABLE:
2308 		/*
2309 		 * Already enabled.
2310 		 */
2311 		break;
2312 
2313 	case BFA_FCPORT_SM_DISABLE:
2314 		if (bfa_fcport_send_disable(fcport))
2315 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2316 		else
2317 			bfa_sm_set_state(fcport,
2318 					 bfa_fcport_sm_disabling_qwait);
2319 
2320 		bfa_fcport_reset_linkinfo(fcport);
2321 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2322 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2323 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2324 		wwn2str(pwwn_buf, fcport->pwwn);
2325 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2326 			"Base port offline: WWN = %s\n", pwwn_buf);
2327 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2328 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2329 			"Base port disabled: WWN = %s\n", pwwn_buf);
2330 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2331 		break;
2332 
2333 	case BFA_FCPORT_SM_LINKDOWN:
2334 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2335 		bfa_fcport_reset_linkinfo(fcport);
2336 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2337 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2338 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2339 		wwn2str(pwwn_buf, fcport->pwwn);
2340 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2341 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2342 				"Base port offline: WWN = %s\n", pwwn_buf);
2343 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2344 		} else {
2345 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2346 				"Base port (WWN = %s) "
2347 				"lost fabric connectivity\n", pwwn_buf);
2348 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2349 		}
2350 		break;
2351 
2352 	case BFA_FCPORT_SM_STOP:
2353 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2354 		bfa_fcport_reset_linkinfo(fcport);
2355 		wwn2str(pwwn_buf, fcport->pwwn);
2356 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2357 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2358 				"Base port offline: WWN = %s\n", pwwn_buf);
2359 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2360 		} else {
2361 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2362 				"Base port (WWN = %s) "
2363 				"lost fabric connectivity\n", pwwn_buf);
2364 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2365 		}
2366 		break;
2367 
2368 	case BFA_FCPORT_SM_HWFAIL:
2369 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2370 		bfa_fcport_reset_linkinfo(fcport);
2371 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2372 		wwn2str(pwwn_buf, fcport->pwwn);
2373 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2374 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2375 				"Base port offline: WWN = %s\n", pwwn_buf);
2376 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2377 		} else {
2378 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2379 				"Base port (WWN = %s) "
2380 				"lost fabric connectivity\n", pwwn_buf);
2381 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2382 		}
2383 		break;
2384 
2385 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2386 		bfa_fcport_reset_linkinfo(fcport);
2387 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2388 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2389 		break;
2390 
2391 	default:
2392 		bfa_sm_fault(fcport->bfa, event);
2393 	}
2394 }
2395 
2396 static void
2397 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2398 				 enum bfa_fcport_sm_event event)
2399 {
2400 	bfa_trc(fcport->bfa, event);
2401 
2402 	switch (event) {
2403 	case BFA_FCPORT_SM_QRESUME:
2404 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2405 		bfa_fcport_send_disable(fcport);
2406 		break;
2407 
2408 	case BFA_FCPORT_SM_STOP:
2409 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2410 		bfa_reqq_wcancel(&fcport->reqq_wait);
2411 		break;
2412 
2413 	case BFA_FCPORT_SM_ENABLE:
2414 		bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2415 		break;
2416 
2417 	case BFA_FCPORT_SM_DISABLE:
2418 		/*
2419 		 * Already being disabled.
2420 		 */
2421 		break;
2422 
2423 	case BFA_FCPORT_SM_LINKUP:
2424 	case BFA_FCPORT_SM_LINKDOWN:
2425 		/*
2426 		 * Possible to get link events when doing back-to-back
2427 		 * enable/disables.
2428 		 */
2429 		break;
2430 
2431 	case BFA_FCPORT_SM_HWFAIL:
2432 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2433 		bfa_reqq_wcancel(&fcport->reqq_wait);
2434 		break;
2435 
2436 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2437 		bfa_fcport_reset_linkinfo(fcport);
2438 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2439 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2440 		break;
2441 
2442 	default:
2443 		bfa_sm_fault(fcport->bfa, event);
2444 	}
2445 }
2446 
2447 static void
2448 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2449 				 enum bfa_fcport_sm_event event)
2450 {
2451 	bfa_trc(fcport->bfa, event);
2452 
2453 	switch (event) {
2454 	case BFA_FCPORT_SM_QRESUME:
2455 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2456 		bfa_fcport_send_disable(fcport);
2457 		if (bfa_fcport_send_enable(fcport))
2458 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2459 		else
2460 			bfa_sm_set_state(fcport,
2461 					 bfa_fcport_sm_enabling_qwait);
2462 		break;
2463 
2464 	case BFA_FCPORT_SM_STOP:
2465 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2466 		bfa_reqq_wcancel(&fcport->reqq_wait);
2467 		break;
2468 
2469 	case BFA_FCPORT_SM_ENABLE:
2470 		break;
2471 
2472 	case BFA_FCPORT_SM_DISABLE:
2473 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2474 		break;
2475 
2476 	case BFA_FCPORT_SM_LINKUP:
2477 	case BFA_FCPORT_SM_LINKDOWN:
2478 		/*
2479 		 * Possible to get link events when doing back-to-back
2480 		 * enable/disables.
2481 		 */
2482 		break;
2483 
2484 	case BFA_FCPORT_SM_HWFAIL:
2485 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2486 		bfa_reqq_wcancel(&fcport->reqq_wait);
2487 		break;
2488 
2489 	default:
2490 		bfa_sm_fault(fcport->bfa, event);
2491 	}
2492 }
2493 
2494 static void
2495 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2496 						enum bfa_fcport_sm_event event)
2497 {
2498 	char pwwn_buf[BFA_STRING_32];
2499 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2500 	bfa_trc(fcport->bfa, event);
2501 
2502 	switch (event) {
2503 	case BFA_FCPORT_SM_FWRSP:
2504 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2505 		break;
2506 
2507 	case BFA_FCPORT_SM_DISABLE:
2508 		/*
2509 		 * Already being disabled.
2510 		 */
2511 		break;
2512 
2513 	case BFA_FCPORT_SM_ENABLE:
2514 		if (bfa_fcport_send_enable(fcport))
2515 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2516 		else
2517 			bfa_sm_set_state(fcport,
2518 					 bfa_fcport_sm_enabling_qwait);
2519 
2520 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2521 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2522 		wwn2str(pwwn_buf, fcport->pwwn);
2523 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2524 			"Base port enabled: WWN = %s\n", pwwn_buf);
2525 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2526 		break;
2527 
2528 	case BFA_FCPORT_SM_STOP:
2529 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2530 		break;
2531 
2532 	case BFA_FCPORT_SM_LINKUP:
2533 	case BFA_FCPORT_SM_LINKDOWN:
2534 		/*
2535 		 * Possible to get link events when doing back-to-back
2536 		 * enable/disables.
2537 		 */
2538 		break;
2539 
2540 	case BFA_FCPORT_SM_HWFAIL:
2541 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2542 		break;
2543 
2544 	default:
2545 		bfa_sm_fault(fcport->bfa, event);
2546 	}
2547 }
2548 
2549 static void
2550 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2551 						enum bfa_fcport_sm_event event)
2552 {
2553 	char pwwn_buf[BFA_STRING_32];
2554 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2555 	bfa_trc(fcport->bfa, event);
2556 
2557 	switch (event) {
2558 	case BFA_FCPORT_SM_START:
2559 		/*
2560 		 * Ignore start event for a port that is disabled.
2561 		 */
2562 		break;
2563 
2564 	case BFA_FCPORT_SM_STOP:
2565 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2566 		break;
2567 
2568 	case BFA_FCPORT_SM_ENABLE:
2569 		if (bfa_fcport_send_enable(fcport))
2570 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2571 		else
2572 			bfa_sm_set_state(fcport,
2573 					 bfa_fcport_sm_enabling_qwait);
2574 
2575 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2576 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2577 		wwn2str(pwwn_buf, fcport->pwwn);
2578 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2579 			"Base port enabled: WWN = %s\n", pwwn_buf);
2580 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2581 		break;
2582 
2583 	case BFA_FCPORT_SM_DISABLE:
2584 		/*
2585 		 * Already disabled.
2586 		 */
2587 		break;
2588 
2589 	case BFA_FCPORT_SM_HWFAIL:
2590 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2591 		break;
2592 
2593 	case BFA_FCPORT_SM_DPORTENABLE:
2594 		bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2595 		break;
2596 
2597 	case BFA_FCPORT_SM_DDPORTENABLE:
2598 		bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
2599 		break;
2600 
2601 	default:
2602 		bfa_sm_fault(fcport->bfa, event);
2603 	}
2604 }
2605 
2606 static void
2607 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2608 			 enum bfa_fcport_sm_event event)
2609 {
2610 	bfa_trc(fcport->bfa, event);
2611 
2612 	switch (event) {
2613 	case BFA_FCPORT_SM_START:
2614 		if (bfa_fcport_send_enable(fcport))
2615 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2616 		else
2617 			bfa_sm_set_state(fcport,
2618 					 bfa_fcport_sm_enabling_qwait);
2619 		break;
2620 
2621 	default:
2622 		/*
2623 		 * Ignore all other events.
2624 		 */
2625 		;
2626 	}
2627 }
2628 
2629 /*
2630  * Port is enabled. IOC is down/failed.
2631  */
2632 static void
2633 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2634 			 enum bfa_fcport_sm_event event)
2635 {
2636 	bfa_trc(fcport->bfa, event);
2637 
2638 	switch (event) {
2639 	case BFA_FCPORT_SM_START:
2640 		if (bfa_fcport_send_enable(fcport))
2641 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2642 		else
2643 			bfa_sm_set_state(fcport,
2644 					 bfa_fcport_sm_enabling_qwait);
2645 		break;
2646 
2647 	default:
2648 		/*
2649 		 * Ignore all events.
2650 		 */
2651 		;
2652 	}
2653 }
2654 
2655 /*
2656  * Port is disabled. IOC is down/failed.
2657  */
2658 static void
2659 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2660 			 enum bfa_fcport_sm_event event)
2661 {
2662 	bfa_trc(fcport->bfa, event);
2663 
2664 	switch (event) {
2665 	case BFA_FCPORT_SM_START:
2666 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2667 		break;
2668 
2669 	case BFA_FCPORT_SM_ENABLE:
2670 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2671 		break;
2672 
2673 	default:
2674 		/*
2675 		 * Ignore all events.
2676 		 */
2677 		;
2678 	}
2679 }
2680 
2681 static void
2682 bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2683 {
2684 	bfa_trc(fcport->bfa, event);
2685 
2686 	switch (event) {
2687 	case BFA_FCPORT_SM_DPORTENABLE:
2688 	case BFA_FCPORT_SM_DISABLE:
2689 	case BFA_FCPORT_SM_ENABLE:
2690 	case BFA_FCPORT_SM_START:
2691 		/*
2692 		 * Ignore event for a port that is dport
2693 		 */
2694 		break;
2695 
2696 	case BFA_FCPORT_SM_STOP:
2697 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2698 		break;
2699 
2700 	case BFA_FCPORT_SM_HWFAIL:
2701 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2702 		break;
2703 
2704 	case BFA_FCPORT_SM_DPORTDISABLE:
2705 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2706 		break;
2707 
2708 	default:
2709 		bfa_sm_fault(fcport->bfa, event);
2710 	}
2711 }
2712 
2713 static void
2714 bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
2715 			enum bfa_fcport_sm_event event)
2716 {
2717 	bfa_trc(fcport->bfa, event);
2718 
2719 	switch (event) {
2720 	case BFA_FCPORT_SM_DISABLE:
2721 	case BFA_FCPORT_SM_DDPORTDISABLE:
2722 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2723 		break;
2724 
2725 	case BFA_FCPORT_SM_DPORTENABLE:
2726 	case BFA_FCPORT_SM_DPORTDISABLE:
2727 	case BFA_FCPORT_SM_ENABLE:
2728 	case BFA_FCPORT_SM_START:
2729 		/**
2730 		 * Ignore event for a port that is ddport
2731 		 */
2732 		break;
2733 
2734 	case BFA_FCPORT_SM_STOP:
2735 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2736 		break;
2737 
2738 	case BFA_FCPORT_SM_HWFAIL:
2739 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2740 		break;
2741 
2742 	default:
2743 		bfa_sm_fault(fcport->bfa, event);
2744 	}
2745 }
2746 
2747 static void
2748 bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2749 			    enum bfa_fcport_sm_event event)
2750 {
2751 	bfa_trc(fcport->bfa, event);
2752 
2753 	switch (event) {
2754 	case BFA_FCPORT_SM_DPORTENABLE:
2755 	case BFA_FCPORT_SM_ENABLE:
2756 	case BFA_FCPORT_SM_START:
2757 		/*
2758 		 * Ignore event for a port as there is FAA misconfig
2759 		 */
2760 		break;
2761 
2762 	case BFA_FCPORT_SM_DISABLE:
2763 		if (bfa_fcport_send_disable(fcport))
2764 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2765 		else
2766 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2767 
2768 		bfa_fcport_reset_linkinfo(fcport);
2769 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2770 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2771 			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2772 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2773 		break;
2774 
2775 	case BFA_FCPORT_SM_STOP:
2776 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2777 		break;
2778 
2779 	case BFA_FCPORT_SM_HWFAIL:
2780 		bfa_fcport_reset_linkinfo(fcport);
2781 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2782 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2783 		break;
2784 
2785 	default:
2786 		bfa_sm_fault(fcport->bfa, event);
2787 	}
2788 }
2789 
2790 /*
2791  * Link state is down
2792  */
2793 static void
2794 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2795 		enum bfa_fcport_ln_sm_event event)
2796 {
2797 	bfa_trc(ln->fcport->bfa, event);
2798 
2799 	switch (event) {
2800 	case BFA_FCPORT_LN_SM_LINKUP:
2801 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2802 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2803 		break;
2804 
2805 	default:
2806 		bfa_sm_fault(ln->fcport->bfa, event);
2807 	}
2808 }
2809 
2810 /*
2811  * Link state is waiting for down notification
2812  */
2813 static void
2814 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2815 		enum bfa_fcport_ln_sm_event event)
2816 {
2817 	bfa_trc(ln->fcport->bfa, event);
2818 
2819 	switch (event) {
2820 	case BFA_FCPORT_LN_SM_LINKUP:
2821 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2822 		break;
2823 
2824 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2825 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2826 		break;
2827 
2828 	default:
2829 		bfa_sm_fault(ln->fcport->bfa, event);
2830 	}
2831 }
2832 
2833 /*
2834  * Link state is waiting for down notification and there is a pending up
2835  */
2836 static void
2837 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2838 		enum bfa_fcport_ln_sm_event event)
2839 {
2840 	bfa_trc(ln->fcport->bfa, event);
2841 
2842 	switch (event) {
2843 	case BFA_FCPORT_LN_SM_LINKDOWN:
2844 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2845 		break;
2846 
2847 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2848 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2849 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2850 		break;
2851 
2852 	default:
2853 		bfa_sm_fault(ln->fcport->bfa, event);
2854 	}
2855 }
2856 
2857 /*
2858  * Link state is up
2859  */
2860 static void
2861 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2862 		enum bfa_fcport_ln_sm_event event)
2863 {
2864 	bfa_trc(ln->fcport->bfa, event);
2865 
2866 	switch (event) {
2867 	case BFA_FCPORT_LN_SM_LINKDOWN:
2868 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2869 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2870 		break;
2871 
2872 	default:
2873 		bfa_sm_fault(ln->fcport->bfa, event);
2874 	}
2875 }
2876 
2877 /*
2878  * Link state is waiting for up notification
2879  */
2880 static void
2881 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2882 		enum bfa_fcport_ln_sm_event event)
2883 {
2884 	bfa_trc(ln->fcport->bfa, event);
2885 
2886 	switch (event) {
2887 	case BFA_FCPORT_LN_SM_LINKDOWN:
2888 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2889 		break;
2890 
2891 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2892 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2893 		break;
2894 
2895 	default:
2896 		bfa_sm_fault(ln->fcport->bfa, event);
2897 	}
2898 }
2899 
2900 /*
2901  * Link state is waiting for up notification and there is a pending down
2902  */
2903 static void
2904 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2905 		enum bfa_fcport_ln_sm_event event)
2906 {
2907 	bfa_trc(ln->fcport->bfa, event);
2908 
2909 	switch (event) {
2910 	case BFA_FCPORT_LN_SM_LINKUP:
2911 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2912 		break;
2913 
2914 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2915 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2916 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2917 		break;
2918 
2919 	default:
2920 		bfa_sm_fault(ln->fcport->bfa, event);
2921 	}
2922 }
2923 
2924 /*
2925  * Link state is waiting for up notification and there are pending down and up
2926  */
2927 static void
2928 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2929 			enum bfa_fcport_ln_sm_event event)
2930 {
2931 	bfa_trc(ln->fcport->bfa, event);
2932 
2933 	switch (event) {
2934 	case BFA_FCPORT_LN_SM_LINKDOWN:
2935 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2936 		break;
2937 
2938 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2939 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2940 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2941 		break;
2942 
2943 	default:
2944 		bfa_sm_fault(ln->fcport->bfa, event);
2945 	}
2946 }
2947 
2948 static void
2949 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2950 {
2951 	struct bfa_fcport_ln_s *ln = cbarg;
2952 
2953 	if (complete)
2954 		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2955 	else
2956 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2957 }
2958 
2959 /*
2960  * Send SCN notification to upper layers.
2961  * trunk - false if caller is fcport to ignore fcport event in trunked mode
2962  */
2963 static void
2964 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2965 	bfa_boolean_t trunk)
2966 {
2967 	if (fcport->cfg.trunked && !trunk)
2968 		return;
2969 
2970 	switch (event) {
2971 	case BFA_PORT_LINKUP:
2972 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2973 		break;
2974 	case BFA_PORT_LINKDOWN:
2975 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2976 		break;
2977 	default:
2978 		WARN_ON(1);
2979 	}
2980 }
2981 
2982 static void
2983 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2984 {
2985 	struct bfa_fcport_s *fcport = ln->fcport;
2986 
2987 	if (fcport->bfa->fcs) {
2988 		fcport->event_cbfn(fcport->event_cbarg, event);
2989 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2990 	} else {
2991 		ln->ln_event = event;
2992 		bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2993 			__bfa_cb_fcport_event, ln);
2994 	}
2995 }
2996 
2997 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2998 							BFA_CACHELINE_SZ))
2999 
3000 void
3001 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3002 		   struct bfa_s *bfa)
3003 {
3004 	struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
3005 
3006 	bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
3007 }
3008 
3009 static void
3010 bfa_fcport_qresume(void *cbarg)
3011 {
3012 	struct bfa_fcport_s *fcport = cbarg;
3013 
3014 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
3015 }
3016 
3017 static void
3018 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
3019 {
3020 	struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
3021 
3022 	fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
3023 	fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
3024 	fcport->stats = (union bfa_fcport_stats_u *)
3025 				bfa_mem_dma_virt(fcport_dma);
3026 }
3027 
3028 /*
3029  * Memory initialization.
3030  */
3031 void
3032 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3033 		struct bfa_pcidev_s *pcidev)
3034 {
3035 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3036 	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
3037 	struct bfa_fcport_ln_s *ln = &fcport->ln;
3038 
3039 	fcport->bfa = bfa;
3040 	ln->fcport = fcport;
3041 
3042 	bfa_fcport_mem_claim(fcport);
3043 
3044 	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
3045 	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
3046 
3047 	/*
3048 	 * initialize time stamp for stats reset
3049 	 */
3050 	fcport->stats_reset_time = ktime_get_seconds();
3051 	fcport->stats_dma_ready = BFA_FALSE;
3052 
3053 	/*
3054 	 * initialize and set default configuration
3055 	 */
3056 	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
3057 	port_cfg->speed = BFA_PORT_SPEED_AUTO;
3058 	port_cfg->trunked = BFA_FALSE;
3059 	port_cfg->maxfrsize = 0;
3060 
3061 	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3062 	port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3063 	port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3064 	port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
3065 
3066 	fcport->fec_state = BFA_FEC_OFFLINE;
3067 
3068 	INIT_LIST_HEAD(&fcport->stats_pending_q);
3069 	INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3070 
3071 	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3072 }
3073 
3074 void
3075 bfa_fcport_start(struct bfa_s *bfa)
3076 {
3077 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3078 }
3079 
3080 /*
3081  * Called when IOC failure is detected.
3082  */
3083 void
3084 bfa_fcport_iocdisable(struct bfa_s *bfa)
3085 {
3086 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3087 
3088 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3089 	bfa_trunk_iocdisable(bfa);
3090 }
3091 
3092 /*
3093  * Update loop info in fcport for SCN online
3094  */
3095 static void
3096 bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3097 			struct bfa_fcport_loop_info_s *loop_info)
3098 {
3099 	fcport->myalpa = loop_info->myalpa;
3100 	fcport->alpabm_valid =
3101 			loop_info->alpabm_val;
3102 	memcpy(fcport->alpabm.alpa_bm,
3103 			loop_info->alpabm.alpa_bm,
3104 			sizeof(struct fc_alpabm_s));
3105 }
3106 
3107 static void
3108 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3109 {
3110 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3111 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3112 
3113 	fcport->speed = pevent->link_state.speed;
3114 	fcport->topology = pevent->link_state.topology;
3115 
3116 	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3117 		bfa_fcport_update_loop_info(fcport,
3118 				&pevent->link_state.attr.loop_info);
3119 		return;
3120 	}
3121 
3122 	/* QoS Details */
3123 	fcport->qos_attr = pevent->link_state.qos_attr;
3124 	fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3125 
3126 	if (fcport->cfg.bb_cr_enabled)
3127 		fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
3128 
3129 	fcport->fec_state = pevent->link_state.fec_state;
3130 
3131 	/*
3132 	 * update trunk state if applicable
3133 	 */
3134 	if (!fcport->cfg.trunked)
3135 		trunk->attr.state = BFA_TRUNK_DISABLED;
3136 
3137 	/* update FCoE specific */
3138 	fcport->fcoe_vlan =
3139 		be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3140 
3141 	bfa_trc(fcport->bfa, fcport->speed);
3142 	bfa_trc(fcport->bfa, fcport->topology);
3143 }
3144 
3145 static void
3146 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3147 {
3148 	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3149 	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3150 	fcport->fec_state = BFA_FEC_OFFLINE;
3151 }
3152 
3153 /*
3154  * Send port enable message to firmware.
3155  */
3156 static bfa_boolean_t
3157 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3158 {
3159 	struct bfi_fcport_enable_req_s *m;
3160 
3161 	/*
3162 	 * Increment message tag before queue check, so that responses to old
3163 	 * requests are discarded.
3164 	 */
3165 	fcport->msgtag++;
3166 
3167 	/*
3168 	 * check for room in queue to send request now
3169 	 */
3170 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3171 	if (!m) {
3172 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3173 							&fcport->reqq_wait);
3174 		return BFA_FALSE;
3175 	}
3176 
3177 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3178 			bfa_fn_lpu(fcport->bfa));
3179 	m->nwwn = fcport->nwwn;
3180 	m->pwwn = fcport->pwwn;
3181 	m->port_cfg = fcport->cfg;
3182 	m->msgtag = fcport->msgtag;
3183 	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3184 	 m->use_flash_cfg = fcport->use_flash_cfg;
3185 	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3186 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3187 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3188 
3189 	/*
3190 	 * queue I/O message to firmware
3191 	 */
3192 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3193 	return BFA_TRUE;
3194 }
3195 
3196 /*
3197  * Send port disable message to firmware.
3198  */
3199 static	bfa_boolean_t
3200 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3201 {
3202 	struct bfi_fcport_req_s *m;
3203 
3204 	/*
3205 	 * Increment message tag before queue check, so that responses to old
3206 	 * requests are discarded.
3207 	 */
3208 	fcport->msgtag++;
3209 
3210 	/*
3211 	 * check for room in queue to send request now
3212 	 */
3213 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3214 	if (!m) {
3215 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3216 							&fcport->reqq_wait);
3217 		return BFA_FALSE;
3218 	}
3219 
3220 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3221 			bfa_fn_lpu(fcport->bfa));
3222 	m->msgtag = fcport->msgtag;
3223 
3224 	/*
3225 	 * queue I/O message to firmware
3226 	 */
3227 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3228 
3229 	return BFA_TRUE;
3230 }
3231 
3232 static void
3233 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3234 {
3235 	fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3236 	fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3237 
3238 	bfa_trc(fcport->bfa, fcport->pwwn);
3239 	bfa_trc(fcport->bfa, fcport->nwwn);
3240 }
3241 
3242 static void
3243 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3244 	struct bfa_qos_stats_s *s)
3245 {
3246 	u32	*dip = (u32 *) d;
3247 	__be32	*sip = (__be32 *) s;
3248 	int		i;
3249 
3250 	/* Now swap the 32 bit fields */
3251 	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3252 		dip[i] = be32_to_cpu(sip[i]);
3253 }
3254 
3255 static void
3256 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3257 	struct bfa_fcoe_stats_s *s)
3258 {
3259 	u32	*dip = (u32 *) d;
3260 	__be32	*sip = (__be32 *) s;
3261 	int		i;
3262 
3263 	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3264 	     i = i + 2) {
3265 #ifdef __BIG_ENDIAN
3266 		dip[i] = be32_to_cpu(sip[i]);
3267 		dip[i + 1] = be32_to_cpu(sip[i + 1]);
3268 #else
3269 		dip[i] = be32_to_cpu(sip[i + 1]);
3270 		dip[i + 1] = be32_to_cpu(sip[i]);
3271 #endif
3272 	}
3273 }
3274 
3275 static void
3276 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3277 {
3278 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3279 	struct bfa_cb_pending_q_s *cb;
3280 	struct list_head *qe, *qen;
3281 	union bfa_fcport_stats_u *ret;
3282 
3283 	if (complete) {
3284 		time64_t time = ktime_get_seconds();
3285 
3286 		list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3287 			bfa_q_deq(&fcport->stats_pending_q, &qe);
3288 			cb = (struct bfa_cb_pending_q_s *)qe;
3289 			if (fcport->stats_status == BFA_STATUS_OK) {
3290 				ret = (union bfa_fcport_stats_u *)cb->data;
3291 				/* Swap FC QoS or FCoE stats */
3292 				if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3293 					bfa_fcport_qos_stats_swap(&ret->fcqos,
3294 							&fcport->stats->fcqos);
3295 				else {
3296 					bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3297 							&fcport->stats->fcoe);
3298 					ret->fcoe.secs_reset =
3299 						time - fcport->stats_reset_time;
3300 				}
3301 			}
3302 			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3303 					fcport->stats_status);
3304 		}
3305 		fcport->stats_status = BFA_STATUS_OK;
3306 	} else {
3307 		INIT_LIST_HEAD(&fcport->stats_pending_q);
3308 		fcport->stats_status = BFA_STATUS_OK;
3309 	}
3310 }
3311 
3312 static void
3313 bfa_fcport_stats_get_timeout(void *cbarg)
3314 {
3315 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3316 
3317 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3318 
3319 	if (fcport->stats_qfull) {
3320 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3321 		fcport->stats_qfull = BFA_FALSE;
3322 	}
3323 
3324 	fcport->stats_status = BFA_STATUS_ETIMER;
3325 	__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3326 }
3327 
3328 static void
3329 bfa_fcport_send_stats_get(void *cbarg)
3330 {
3331 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3332 	struct bfi_fcport_req_s *msg;
3333 
3334 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3335 
3336 	if (!msg) {
3337 		fcport->stats_qfull = BFA_TRUE;
3338 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3339 				bfa_fcport_send_stats_get, fcport);
3340 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3341 				&fcport->stats_reqq_wait);
3342 		return;
3343 	}
3344 	fcport->stats_qfull = BFA_FALSE;
3345 
3346 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3347 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3348 			bfa_fn_lpu(fcport->bfa));
3349 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3350 }
3351 
3352 static void
3353 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3354 {
3355 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3356 	struct bfa_cb_pending_q_s *cb;
3357 	struct list_head *qe, *qen;
3358 
3359 	if (complete) {
3360 		/*
3361 		 * re-initialize time stamp for stats reset
3362 		 */
3363 		fcport->stats_reset_time = ktime_get_seconds();
3364 		list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3365 			bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3366 			cb = (struct bfa_cb_pending_q_s *)qe;
3367 			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3368 						fcport->stats_status);
3369 		}
3370 		fcport->stats_status = BFA_STATUS_OK;
3371 	} else {
3372 		INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3373 		fcport->stats_status = BFA_STATUS_OK;
3374 	}
3375 }
3376 
3377 static void
3378 bfa_fcport_stats_clr_timeout(void *cbarg)
3379 {
3380 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3381 
3382 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3383 
3384 	if (fcport->stats_qfull) {
3385 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3386 		fcport->stats_qfull = BFA_FALSE;
3387 	}
3388 
3389 	fcport->stats_status = BFA_STATUS_ETIMER;
3390 	__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3391 }
3392 
3393 static void
3394 bfa_fcport_send_stats_clear(void *cbarg)
3395 {
3396 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3397 	struct bfi_fcport_req_s *msg;
3398 
3399 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3400 
3401 	if (!msg) {
3402 		fcport->stats_qfull = BFA_TRUE;
3403 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3404 				bfa_fcport_send_stats_clear, fcport);
3405 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3406 						&fcport->stats_reqq_wait);
3407 		return;
3408 	}
3409 	fcport->stats_qfull = BFA_FALSE;
3410 
3411 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3412 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3413 			bfa_fn_lpu(fcport->bfa));
3414 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3415 }
3416 
3417 /*
3418  * Handle trunk SCN event from firmware.
3419  */
3420 static void
3421 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3422 {
3423 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3424 	struct bfi_fcport_trunk_link_s *tlink;
3425 	struct bfa_trunk_link_attr_s *lattr;
3426 	enum bfa_trunk_state state_prev;
3427 	int i;
3428 	int link_bm = 0;
3429 
3430 	bfa_trc(fcport->bfa, fcport->cfg.trunked);
3431 	WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3432 		   scn->trunk_state != BFA_TRUNK_OFFLINE);
3433 
3434 	bfa_trc(fcport->bfa, trunk->attr.state);
3435 	bfa_trc(fcport->bfa, scn->trunk_state);
3436 	bfa_trc(fcport->bfa, scn->trunk_speed);
3437 
3438 	/*
3439 	 * Save off new state for trunk attribute query
3440 	 */
3441 	state_prev = trunk->attr.state;
3442 	if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3443 		trunk->attr.state = scn->trunk_state;
3444 	trunk->attr.speed = scn->trunk_speed;
3445 	for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3446 		lattr = &trunk->attr.link_attr[i];
3447 		tlink = &scn->tlink[i];
3448 
3449 		lattr->link_state = tlink->state;
3450 		lattr->trunk_wwn  = tlink->trunk_wwn;
3451 		lattr->fctl	  = tlink->fctl;
3452 		lattr->speed	  = tlink->speed;
3453 		lattr->deskew	  = be32_to_cpu(tlink->deskew);
3454 
3455 		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3456 			fcport->speed	 = tlink->speed;
3457 			fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3458 			link_bm |= 1 << i;
3459 		}
3460 
3461 		bfa_trc(fcport->bfa, lattr->link_state);
3462 		bfa_trc(fcport->bfa, lattr->trunk_wwn);
3463 		bfa_trc(fcport->bfa, lattr->fctl);
3464 		bfa_trc(fcport->bfa, lattr->speed);
3465 		bfa_trc(fcport->bfa, lattr->deskew);
3466 	}
3467 
3468 	switch (link_bm) {
3469 	case 3:
3470 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3471 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3472 		break;
3473 	case 2:
3474 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3475 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3476 		break;
3477 	case 1:
3478 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3479 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3480 		break;
3481 	default:
3482 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3483 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3484 	}
3485 
3486 	/*
3487 	 * Notify upper layers if trunk state changed.
3488 	 */
3489 	if ((state_prev != trunk->attr.state) ||
3490 		(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3491 		bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3492 			BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3493 	}
3494 }
3495 
3496 static void
3497 bfa_trunk_iocdisable(struct bfa_s *bfa)
3498 {
3499 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3500 	int i = 0;
3501 
3502 	/*
3503 	 * In trunked mode, notify upper layers that link is down
3504 	 */
3505 	if (fcport->cfg.trunked) {
3506 		if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3507 			bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3508 
3509 		fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3510 		fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3511 		for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3512 			fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3513 			fcport->trunk.attr.link_attr[i].fctl =
3514 						BFA_TRUNK_LINK_FCTL_NORMAL;
3515 			fcport->trunk.attr.link_attr[i].link_state =
3516 						BFA_TRUNK_LINK_STATE_DN_LINKDN;
3517 			fcport->trunk.attr.link_attr[i].speed =
3518 						BFA_PORT_SPEED_UNKNOWN;
3519 			fcport->trunk.attr.link_attr[i].deskew = 0;
3520 		}
3521 	}
3522 }
3523 
3524 /*
3525  * Called to initialize port attributes
3526  */
3527 void
3528 bfa_fcport_init(struct bfa_s *bfa)
3529 {
3530 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3531 
3532 	/*
3533 	 * Initialize port attributes from IOC hardware data.
3534 	 */
3535 	bfa_fcport_set_wwns(fcport);
3536 	if (fcport->cfg.maxfrsize == 0)
3537 		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3538 	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3539 	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3540 
3541 	if (bfa_fcport_is_pbcdisabled(bfa))
3542 		bfa->modules.port.pbc_disabled = BFA_TRUE;
3543 
3544 	WARN_ON(!fcport->cfg.maxfrsize);
3545 	WARN_ON(!fcport->cfg.rx_bbcredit);
3546 	WARN_ON(!fcport->speed_sup);
3547 }
3548 
3549 /*
3550  * Firmware message handler.
3551  */
3552 void
3553 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3554 {
3555 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3556 	union bfi_fcport_i2h_msg_u i2hmsg;
3557 
3558 	i2hmsg.msg = msg;
3559 	fcport->event_arg.i2hmsg = i2hmsg;
3560 
3561 	bfa_trc(bfa, msg->mhdr.msg_id);
3562 	bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3563 
3564 	switch (msg->mhdr.msg_id) {
3565 	case BFI_FCPORT_I2H_ENABLE_RSP:
3566 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3567 
3568 			fcport->stats_dma_ready = BFA_TRUE;
3569 			if (fcport->use_flash_cfg) {
3570 				fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3571 				fcport->cfg.maxfrsize =
3572 					cpu_to_be16(fcport->cfg.maxfrsize);
3573 				fcport->cfg.path_tov =
3574 					cpu_to_be16(fcport->cfg.path_tov);
3575 				fcport->cfg.q_depth =
3576 					cpu_to_be16(fcport->cfg.q_depth);
3577 
3578 				if (fcport->cfg.trunked)
3579 					fcport->trunk.attr.state =
3580 						BFA_TRUNK_OFFLINE;
3581 				else
3582 					fcport->trunk.attr.state =
3583 						BFA_TRUNK_DISABLED;
3584 				fcport->qos_attr.qos_bw =
3585 					i2hmsg.penable_rsp->port_cfg.qos_bw;
3586 				fcport->use_flash_cfg = BFA_FALSE;
3587 			}
3588 
3589 			if (fcport->cfg.qos_enabled)
3590 				fcport->qos_attr.state = BFA_QOS_OFFLINE;
3591 			else
3592 				fcport->qos_attr.state = BFA_QOS_DISABLED;
3593 
3594 			fcport->qos_attr.qos_bw_op =
3595 					i2hmsg.penable_rsp->port_cfg.qos_bw;
3596 
3597 			if (fcport->cfg.bb_cr_enabled)
3598 				fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3599 			else
3600 				fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3601 
3602 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3603 		}
3604 		break;
3605 
3606 	case BFI_FCPORT_I2H_DISABLE_RSP:
3607 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3608 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3609 		break;
3610 
3611 	case BFI_FCPORT_I2H_EVENT:
3612 		if (fcport->cfg.bb_cr_enabled)
3613 			fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3614 		else
3615 			fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3616 
3617 		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3618 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3619 		else {
3620 			if (i2hmsg.event->link_state.linkstate_rsn ==
3621 			    BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3622 				bfa_sm_send_event(fcport,
3623 						  BFA_FCPORT_SM_FAA_MISCONFIG);
3624 			else
3625 				bfa_sm_send_event(fcport,
3626 						  BFA_FCPORT_SM_LINKDOWN);
3627 		}
3628 		fcport->qos_attr.qos_bw_op =
3629 				i2hmsg.event->link_state.qos_attr.qos_bw_op;
3630 		break;
3631 
3632 	case BFI_FCPORT_I2H_TRUNK_SCN:
3633 		bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3634 		break;
3635 
3636 	case BFI_FCPORT_I2H_STATS_GET_RSP:
3637 		/*
3638 		 * check for timer pop before processing the rsp
3639 		 */
3640 		if (list_empty(&fcport->stats_pending_q) ||
3641 		    (fcport->stats_status == BFA_STATUS_ETIMER))
3642 			break;
3643 
3644 		bfa_timer_stop(&fcport->timer);
3645 		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3646 		__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3647 		break;
3648 
3649 	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3650 		/*
3651 		 * check for timer pop before processing the rsp
3652 		 */
3653 		if (list_empty(&fcport->statsclr_pending_q) ||
3654 		    (fcport->stats_status == BFA_STATUS_ETIMER))
3655 			break;
3656 
3657 		bfa_timer_stop(&fcport->timer);
3658 		fcport->stats_status = BFA_STATUS_OK;
3659 		__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3660 		break;
3661 
3662 	case BFI_FCPORT_I2H_ENABLE_AEN:
3663 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3664 		break;
3665 
3666 	case BFI_FCPORT_I2H_DISABLE_AEN:
3667 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3668 		break;
3669 
3670 	default:
3671 		WARN_ON(1);
3672 	break;
3673 	}
3674 }
3675 
3676 /*
3677  * Registered callback for port events.
3678  */
3679 void
3680 bfa_fcport_event_register(struct bfa_s *bfa,
3681 				void (*cbfn) (void *cbarg,
3682 				enum bfa_port_linkstate event),
3683 				void *cbarg)
3684 {
3685 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3686 
3687 	fcport->event_cbfn = cbfn;
3688 	fcport->event_cbarg = cbarg;
3689 }
3690 
3691 bfa_status_t
3692 bfa_fcport_enable(struct bfa_s *bfa)
3693 {
3694 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3695 
3696 	if (bfa_fcport_is_pbcdisabled(bfa))
3697 		return BFA_STATUS_PBC;
3698 
3699 	if (bfa_ioc_is_disabled(&bfa->ioc))
3700 		return BFA_STATUS_IOC_DISABLED;
3701 
3702 	if (fcport->diag_busy)
3703 		return BFA_STATUS_DIAG_BUSY;
3704 
3705 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3706 	return BFA_STATUS_OK;
3707 }
3708 
3709 bfa_status_t
3710 bfa_fcport_disable(struct bfa_s *bfa)
3711 {
3712 	if (bfa_fcport_is_pbcdisabled(bfa))
3713 		return BFA_STATUS_PBC;
3714 
3715 	if (bfa_ioc_is_disabled(&bfa->ioc))
3716 		return BFA_STATUS_IOC_DISABLED;
3717 
3718 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3719 	return BFA_STATUS_OK;
3720 }
3721 
3722 /* If PBC is disabled on port, return error */
3723 bfa_status_t
3724 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3725 {
3726 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3727 	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3728 	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3729 
3730 	if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3731 		bfa_trc(bfa, fcport->pwwn);
3732 		return BFA_STATUS_PBC;
3733 	}
3734 	return BFA_STATUS_OK;
3735 }
3736 
3737 /*
3738  * Configure port speed.
3739  */
3740 bfa_status_t
3741 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3742 {
3743 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3744 
3745 	bfa_trc(bfa, speed);
3746 
3747 	if (fcport->cfg.trunked == BFA_TRUE)
3748 		return BFA_STATUS_TRUNK_ENABLED;
3749 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3750 			(speed == BFA_PORT_SPEED_16GBPS))
3751 		return BFA_STATUS_UNSUPP_SPEED;
3752 	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3753 		bfa_trc(bfa, fcport->speed_sup);
3754 		return BFA_STATUS_UNSUPP_SPEED;
3755 	}
3756 
3757 	/* Port speed entered needs to be checked */
3758 	if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3759 		/* For CT2, 1G is not supported */
3760 		if ((speed == BFA_PORT_SPEED_1GBPS) &&
3761 		    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3762 			return BFA_STATUS_UNSUPP_SPEED;
3763 
3764 		/* Already checked for Auto Speed and Max Speed supp */
3765 		if (!(speed == BFA_PORT_SPEED_1GBPS ||
3766 		      speed == BFA_PORT_SPEED_2GBPS ||
3767 		      speed == BFA_PORT_SPEED_4GBPS ||
3768 		      speed == BFA_PORT_SPEED_8GBPS ||
3769 		      speed == BFA_PORT_SPEED_16GBPS ||
3770 		      speed == BFA_PORT_SPEED_AUTO))
3771 			return BFA_STATUS_UNSUPP_SPEED;
3772 	} else {
3773 		if (speed != BFA_PORT_SPEED_10GBPS)
3774 			return BFA_STATUS_UNSUPP_SPEED;
3775 	}
3776 
3777 	fcport->cfg.speed = speed;
3778 
3779 	return BFA_STATUS_OK;
3780 }
3781 
3782 /*
3783  * Get current speed.
3784  */
3785 enum bfa_port_speed
3786 bfa_fcport_get_speed(struct bfa_s *bfa)
3787 {
3788 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3789 
3790 	return fcport->speed;
3791 }
3792 
3793 /*
3794  * Configure port topology.
3795  */
3796 bfa_status_t
3797 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3798 {
3799 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3800 
3801 	bfa_trc(bfa, topology);
3802 	bfa_trc(bfa, fcport->cfg.topology);
3803 
3804 	switch (topology) {
3805 	case BFA_PORT_TOPOLOGY_P2P:
3806 		break;
3807 
3808 	case BFA_PORT_TOPOLOGY_LOOP:
3809 		if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3810 			(fcport->qos_attr.state != BFA_QOS_DISABLED))
3811 			return BFA_STATUS_ERROR_QOS_ENABLED;
3812 		if (fcport->cfg.ratelimit != BFA_FALSE)
3813 			return BFA_STATUS_ERROR_TRL_ENABLED;
3814 		if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3815 			(fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3816 			return BFA_STATUS_ERROR_TRUNK_ENABLED;
3817 		if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3818 			(fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3819 			return BFA_STATUS_UNSUPP_SPEED;
3820 		if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3821 			return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3822 		if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3823 			return BFA_STATUS_DPORT_ERR;
3824 		if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
3825 			return BFA_STATUS_DPORT_ERR;
3826 		break;
3827 
3828 	case BFA_PORT_TOPOLOGY_AUTO:
3829 		break;
3830 
3831 	default:
3832 		return BFA_STATUS_EINVAL;
3833 	}
3834 
3835 	fcport->cfg.topology = topology;
3836 	return BFA_STATUS_OK;
3837 }
3838 
3839 /*
3840  * Get current topology.
3841  */
3842 enum bfa_port_topology
3843 bfa_fcport_get_topology(struct bfa_s *bfa)
3844 {
3845 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3846 
3847 	return fcport->topology;
3848 }
3849 
3850 /**
3851  * Get config topology.
3852  */
3853 enum bfa_port_topology
3854 bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3855 {
3856 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3857 
3858 	return fcport->cfg.topology;
3859 }
3860 
3861 bfa_status_t
3862 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3863 {
3864 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3865 
3866 	bfa_trc(bfa, alpa);
3867 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3868 	bfa_trc(bfa, fcport->cfg.hardalpa);
3869 
3870 	fcport->cfg.cfg_hardalpa = BFA_TRUE;
3871 	fcport->cfg.hardalpa = alpa;
3872 
3873 	return BFA_STATUS_OK;
3874 }
3875 
3876 bfa_status_t
3877 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3878 {
3879 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3880 
3881 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3882 	bfa_trc(bfa, fcport->cfg.hardalpa);
3883 
3884 	fcport->cfg.cfg_hardalpa = BFA_FALSE;
3885 	return BFA_STATUS_OK;
3886 }
3887 
3888 bfa_boolean_t
3889 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3890 {
3891 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3892 
3893 	*alpa = fcport->cfg.hardalpa;
3894 	return fcport->cfg.cfg_hardalpa;
3895 }
3896 
3897 u8
3898 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3899 {
3900 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3901 
3902 	return fcport->myalpa;
3903 }
3904 
3905 bfa_status_t
3906 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3907 {
3908 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3909 
3910 	bfa_trc(bfa, maxfrsize);
3911 	bfa_trc(bfa, fcport->cfg.maxfrsize);
3912 
3913 	/* with in range */
3914 	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3915 		return BFA_STATUS_INVLD_DFSZ;
3916 
3917 	/* power of 2, if not the max frame size of 2112 */
3918 	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3919 		return BFA_STATUS_INVLD_DFSZ;
3920 
3921 	fcport->cfg.maxfrsize = maxfrsize;
3922 	return BFA_STATUS_OK;
3923 }
3924 
3925 u16
3926 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3927 {
3928 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3929 
3930 	return fcport->cfg.maxfrsize;
3931 }
3932 
3933 u8
3934 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3935 {
3936 	if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3937 		return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
3938 
3939 	else
3940 		return 0;
3941 }
3942 
3943 void
3944 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3945 {
3946 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3947 
3948 	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3949 }
3950 
3951 /*
3952  * Get port attributes.
3953  */
3954 
3955 wwn_t
3956 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3957 {
3958 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3959 	if (node)
3960 		return fcport->nwwn;
3961 	else
3962 		return fcport->pwwn;
3963 }
3964 
3965 void
3966 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3967 {
3968 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3969 
3970 	memset(attr, 0, sizeof(struct bfa_port_attr_s));
3971 
3972 	attr->nwwn = fcport->nwwn;
3973 	attr->pwwn = fcport->pwwn;
3974 
3975 	attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3976 	attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3977 
3978 	memcpy(&attr->pport_cfg, &fcport->cfg,
3979 		sizeof(struct bfa_port_cfg_s));
3980 	/* speed attributes */
3981 	attr->pport_cfg.speed = fcport->cfg.speed;
3982 	attr->speed_supported = fcport->speed_sup;
3983 	attr->speed = fcport->speed;
3984 	attr->cos_supported = FC_CLASS_3;
3985 
3986 	/* topology attributes */
3987 	attr->pport_cfg.topology = fcport->cfg.topology;
3988 	attr->topology = fcport->topology;
3989 	attr->pport_cfg.trunked = fcport->cfg.trunked;
3990 
3991 	/* beacon attributes */
3992 	attr->beacon = fcport->beacon;
3993 	attr->link_e2e_beacon = fcport->link_e2e_beacon;
3994 
3995 	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3996 	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3997 	attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3998 
3999 	attr->fec_state = fcport->fec_state;
4000 
4001 	/* PBC Disabled State */
4002 	if (bfa_fcport_is_pbcdisabled(bfa))
4003 		attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
4004 	else {
4005 		if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
4006 			attr->port_state = BFA_PORT_ST_IOCDIS;
4007 		else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
4008 			attr->port_state = BFA_PORT_ST_FWMISMATCH;
4009 	}
4010 
4011 	/* FCoE vlan */
4012 	attr->fcoe_vlan = fcport->fcoe_vlan;
4013 }
4014 
4015 #define BFA_FCPORT_STATS_TOV	1000
4016 
4017 /*
4018  * Fetch port statistics (FCQoS or FCoE).
4019  */
4020 bfa_status_t
4021 bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4022 {
4023 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4024 
4025 	if (!bfa_iocfc_is_operational(bfa) ||
4026 	    !fcport->stats_dma_ready)
4027 		return BFA_STATUS_IOC_NON_OP;
4028 
4029 	if (!list_empty(&fcport->statsclr_pending_q))
4030 		return BFA_STATUS_DEVBUSY;
4031 
4032 	if (list_empty(&fcport->stats_pending_q)) {
4033 		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4034 		bfa_fcport_send_stats_get(fcport);
4035 		bfa_timer_start(bfa, &fcport->timer,
4036 				bfa_fcport_stats_get_timeout,
4037 				fcport, BFA_FCPORT_STATS_TOV);
4038 	} else
4039 		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4040 
4041 	return BFA_STATUS_OK;
4042 }
4043 
4044 /*
4045  * Reset port statistics (FCQoS or FCoE).
4046  */
4047 bfa_status_t
4048 bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4049 {
4050 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4051 
4052 	if (!bfa_iocfc_is_operational(bfa) ||
4053 	    !fcport->stats_dma_ready)
4054 		return BFA_STATUS_IOC_NON_OP;
4055 
4056 	if (!list_empty(&fcport->stats_pending_q))
4057 		return BFA_STATUS_DEVBUSY;
4058 
4059 	if (list_empty(&fcport->statsclr_pending_q)) {
4060 		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4061 		bfa_fcport_send_stats_clear(fcport);
4062 		bfa_timer_start(bfa, &fcport->timer,
4063 				bfa_fcport_stats_clr_timeout,
4064 				fcport, BFA_FCPORT_STATS_TOV);
4065 	} else
4066 		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4067 
4068 	return BFA_STATUS_OK;
4069 }
4070 
4071 /*
4072  * Fetch port attributes.
4073  */
4074 bfa_boolean_t
4075 bfa_fcport_is_disabled(struct bfa_s *bfa)
4076 {
4077 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4078 
4079 	return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4080 		BFA_PORT_ST_DISABLED;
4081 
4082 }
4083 
4084 bfa_boolean_t
4085 bfa_fcport_is_dport(struct bfa_s *bfa)
4086 {
4087 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4088 
4089 	return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4090 		BFA_PORT_ST_DPORT);
4091 }
4092 
4093 bfa_boolean_t
4094 bfa_fcport_is_ddport(struct bfa_s *bfa)
4095 {
4096 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4097 
4098 	return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4099 		BFA_PORT_ST_DDPORT);
4100 }
4101 
4102 bfa_status_t
4103 bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4104 {
4105 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4106 	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4107 
4108 	bfa_trc(bfa, ioc_type);
4109 
4110 	if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4111 		return BFA_STATUS_QOS_BW_INVALID;
4112 
4113 	if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4114 		return BFA_STATUS_QOS_BW_INVALID;
4115 
4116 	if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4117 	    (qos_bw->low > qos_bw->high))
4118 		return BFA_STATUS_QOS_BW_INVALID;
4119 
4120 	if ((ioc_type == BFA_IOC_TYPE_FC) &&
4121 	    (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4122 		fcport->cfg.qos_bw = *qos_bw;
4123 
4124 	return BFA_STATUS_OK;
4125 }
4126 
4127 bfa_boolean_t
4128 bfa_fcport_is_ratelim(struct bfa_s *bfa)
4129 {
4130 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4131 
4132 	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4133 
4134 }
4135 
4136 /*
4137  *	Enable/Disable FAA feature in port config
4138  */
4139 void
4140 bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
4141 {
4142 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4143 
4144 	bfa_trc(bfa, state);
4145 	fcport->cfg.faa_state = state;
4146 }
4147 
4148 /*
4149  * Get default minimum ratelim speed
4150  */
4151 enum bfa_port_speed
4152 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4153 {
4154 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4155 
4156 	bfa_trc(bfa, fcport->cfg.trl_def_speed);
4157 	return fcport->cfg.trl_def_speed;
4158 
4159 }
4160 
4161 void
4162 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4163 		  bfa_boolean_t link_e2e_beacon)
4164 {
4165 	struct bfa_s *bfa = dev;
4166 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4167 
4168 	bfa_trc(bfa, beacon);
4169 	bfa_trc(bfa, link_e2e_beacon);
4170 	bfa_trc(bfa, fcport->beacon);
4171 	bfa_trc(bfa, fcport->link_e2e_beacon);
4172 
4173 	fcport->beacon = beacon;
4174 	fcport->link_e2e_beacon = link_e2e_beacon;
4175 }
4176 
4177 bfa_boolean_t
4178 bfa_fcport_is_linkup(struct bfa_s *bfa)
4179 {
4180 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4181 
4182 	return	(!fcport->cfg.trunked &&
4183 		 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4184 		(fcport->cfg.trunked &&
4185 		 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4186 }
4187 
4188 bfa_boolean_t
4189 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4190 {
4191 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4192 
4193 	return fcport->cfg.qos_enabled;
4194 }
4195 
4196 bfa_boolean_t
4197 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4198 {
4199 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4200 
4201 	return fcport->cfg.trunked;
4202 }
4203 
4204 bfa_status_t
4205 bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
4206 {
4207 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4208 
4209 	bfa_trc(bfa, on_off);
4210 
4211 	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4212 		return BFA_STATUS_BBCR_FC_ONLY;
4213 
4214 	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
4215 		(bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
4216 		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
4217 
4218 	if (on_off) {
4219 		if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4220 			return BFA_STATUS_TOPOLOGY_LOOP;
4221 
4222 		if (fcport->cfg.qos_enabled)
4223 			return BFA_STATUS_ERROR_QOS_ENABLED;
4224 
4225 		if (fcport->cfg.trunked)
4226 			return BFA_STATUS_TRUNK_ENABLED;
4227 
4228 		if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
4229 			(fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
4230 			return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
4231 
4232 		if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
4233 			return BFA_STATUS_FEATURE_NOT_SUPPORTED;
4234 
4235 		if (fcport->cfg.bb_cr_enabled) {
4236 			if (bb_scn != fcport->cfg.bb_scn)
4237 				return BFA_STATUS_BBCR_CFG_NO_CHANGE;
4238 			else
4239 				return BFA_STATUS_NO_CHANGE;
4240 		}
4241 
4242 		if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
4243 			bb_scn = BFA_BB_SCN_DEF;
4244 
4245 		fcport->cfg.bb_cr_enabled = on_off;
4246 		fcport->cfg.bb_scn = bb_scn;
4247 	} else {
4248 		if (!fcport->cfg.bb_cr_enabled)
4249 			return BFA_STATUS_NO_CHANGE;
4250 
4251 		fcport->cfg.bb_cr_enabled = on_off;
4252 		fcport->cfg.bb_scn = 0;
4253 	}
4254 
4255 	return BFA_STATUS_OK;
4256 }
4257 
4258 bfa_status_t
4259 bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
4260 		struct bfa_bbcr_attr_s *bbcr_attr)
4261 {
4262 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4263 
4264 	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4265 		return BFA_STATUS_BBCR_FC_ONLY;
4266 
4267 	if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4268 		return BFA_STATUS_TOPOLOGY_LOOP;
4269 
4270 	*bbcr_attr = fcport->bbcr_attr;
4271 
4272 	return BFA_STATUS_OK;
4273 }
4274 
4275 void
4276 bfa_fcport_dportenable(struct bfa_s *bfa)
4277 {
4278 	/*
4279 	 * Assume caller check for port is in disable state
4280 	 */
4281 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4282 	bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4283 }
4284 
4285 void
4286 bfa_fcport_dportdisable(struct bfa_s *bfa)
4287 {
4288 	/*
4289 	 * Assume caller check for port is in disable state
4290 	 */
4291 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4292 	bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4293 }
4294 
4295 void
4296 bfa_fcport_ddportenable(struct bfa_s *bfa)
4297 {
4298 	/*
4299 	 * Assume caller check for port is in disable state
4300 	 */
4301 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
4302 }
4303 
4304 void
4305 bfa_fcport_ddportdisable(struct bfa_s *bfa)
4306 {
4307 	/*
4308 	 * Assume caller check for port is in disable state
4309 	 */
4310 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
4311 }
4312 
4313 /*
4314  * Rport State machine functions
4315  */
4316 /*
4317  * Beginning state, only online event expected.
4318  */
4319 static void
4320 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4321 {
4322 	bfa_trc(rp->bfa, rp->rport_tag);
4323 	bfa_trc(rp->bfa, event);
4324 
4325 	switch (event) {
4326 	case BFA_RPORT_SM_CREATE:
4327 		bfa_stats(rp, sm_un_cr);
4328 		bfa_sm_set_state(rp, bfa_rport_sm_created);
4329 		break;
4330 
4331 	default:
4332 		bfa_stats(rp, sm_un_unexp);
4333 		bfa_sm_fault(rp->bfa, event);
4334 	}
4335 }
4336 
4337 static void
4338 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4339 {
4340 	bfa_trc(rp->bfa, rp->rport_tag);
4341 	bfa_trc(rp->bfa, event);
4342 
4343 	switch (event) {
4344 	case BFA_RPORT_SM_ONLINE:
4345 		bfa_stats(rp, sm_cr_on);
4346 		if (bfa_rport_send_fwcreate(rp))
4347 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4348 		else
4349 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4350 		break;
4351 
4352 	case BFA_RPORT_SM_DELETE:
4353 		bfa_stats(rp, sm_cr_del);
4354 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4355 		bfa_rport_free(rp);
4356 		break;
4357 
4358 	case BFA_RPORT_SM_HWFAIL:
4359 		bfa_stats(rp, sm_cr_hwf);
4360 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4361 		break;
4362 
4363 	default:
4364 		bfa_stats(rp, sm_cr_unexp);
4365 		bfa_sm_fault(rp->bfa, event);
4366 	}
4367 }
4368 
4369 /*
4370  * Waiting for rport create response from firmware.
4371  */
4372 static void
4373 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4374 {
4375 	bfa_trc(rp->bfa, rp->rport_tag);
4376 	bfa_trc(rp->bfa, event);
4377 
4378 	switch (event) {
4379 	case BFA_RPORT_SM_FWRSP:
4380 		bfa_stats(rp, sm_fwc_rsp);
4381 		bfa_sm_set_state(rp, bfa_rport_sm_online);
4382 		bfa_rport_online_cb(rp);
4383 		break;
4384 
4385 	case BFA_RPORT_SM_DELETE:
4386 		bfa_stats(rp, sm_fwc_del);
4387 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4388 		break;
4389 
4390 	case BFA_RPORT_SM_OFFLINE:
4391 		bfa_stats(rp, sm_fwc_off);
4392 		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4393 		break;
4394 
4395 	case BFA_RPORT_SM_HWFAIL:
4396 		bfa_stats(rp, sm_fwc_hwf);
4397 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4398 		break;
4399 
4400 	default:
4401 		bfa_stats(rp, sm_fwc_unexp);
4402 		bfa_sm_fault(rp->bfa, event);
4403 	}
4404 }
4405 
4406 /*
4407  * Request queue is full, awaiting queue resume to send create request.
4408  */
4409 static void
4410 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4411 {
4412 	bfa_trc(rp->bfa, rp->rport_tag);
4413 	bfa_trc(rp->bfa, event);
4414 
4415 	switch (event) {
4416 	case BFA_RPORT_SM_QRESUME:
4417 		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4418 		bfa_rport_send_fwcreate(rp);
4419 		break;
4420 
4421 	case BFA_RPORT_SM_DELETE:
4422 		bfa_stats(rp, sm_fwc_del);
4423 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4424 		bfa_reqq_wcancel(&rp->reqq_wait);
4425 		bfa_rport_free(rp);
4426 		break;
4427 
4428 	case BFA_RPORT_SM_OFFLINE:
4429 		bfa_stats(rp, sm_fwc_off);
4430 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4431 		bfa_reqq_wcancel(&rp->reqq_wait);
4432 		bfa_rport_offline_cb(rp);
4433 		break;
4434 
4435 	case BFA_RPORT_SM_HWFAIL:
4436 		bfa_stats(rp, sm_fwc_hwf);
4437 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4438 		bfa_reqq_wcancel(&rp->reqq_wait);
4439 		break;
4440 
4441 	default:
4442 		bfa_stats(rp, sm_fwc_unexp);
4443 		bfa_sm_fault(rp->bfa, event);
4444 	}
4445 }
4446 
4447 /*
4448  * Online state - normal parking state.
4449  */
4450 static void
4451 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4452 {
4453 	struct bfi_rport_qos_scn_s *qos_scn;
4454 
4455 	bfa_trc(rp->bfa, rp->rport_tag);
4456 	bfa_trc(rp->bfa, event);
4457 
4458 	switch (event) {
4459 	case BFA_RPORT_SM_OFFLINE:
4460 		bfa_stats(rp, sm_on_off);
4461 		if (bfa_rport_send_fwdelete(rp))
4462 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4463 		else
4464 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4465 		break;
4466 
4467 	case BFA_RPORT_SM_DELETE:
4468 		bfa_stats(rp, sm_on_del);
4469 		if (bfa_rport_send_fwdelete(rp))
4470 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4471 		else
4472 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4473 		break;
4474 
4475 	case BFA_RPORT_SM_HWFAIL:
4476 		bfa_stats(rp, sm_on_hwf);
4477 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4478 		break;
4479 
4480 	case BFA_RPORT_SM_SET_SPEED:
4481 		bfa_rport_send_fwspeed(rp);
4482 		break;
4483 
4484 	case BFA_RPORT_SM_QOS_SCN:
4485 		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4486 		rp->qos_attr = qos_scn->new_qos_attr;
4487 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4488 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4489 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4490 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4491 
4492 		qos_scn->old_qos_attr.qos_flow_id  =
4493 			be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4494 		qos_scn->new_qos_attr.qos_flow_id  =
4495 			be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4496 
4497 		if (qos_scn->old_qos_attr.qos_flow_id !=
4498 			qos_scn->new_qos_attr.qos_flow_id)
4499 			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4500 						    qos_scn->old_qos_attr,
4501 						    qos_scn->new_qos_attr);
4502 		if (qos_scn->old_qos_attr.qos_priority !=
4503 			qos_scn->new_qos_attr.qos_priority)
4504 			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4505 						  qos_scn->old_qos_attr,
4506 						  qos_scn->new_qos_attr);
4507 		break;
4508 
4509 	default:
4510 		bfa_stats(rp, sm_on_unexp);
4511 		bfa_sm_fault(rp->bfa, event);
4512 	}
4513 }
4514 
4515 /*
4516  * Firmware rport is being deleted - awaiting f/w response.
4517  */
4518 static void
4519 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4520 {
4521 	bfa_trc(rp->bfa, rp->rport_tag);
4522 	bfa_trc(rp->bfa, event);
4523 
4524 	switch (event) {
4525 	case BFA_RPORT_SM_FWRSP:
4526 		bfa_stats(rp, sm_fwd_rsp);
4527 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4528 		bfa_rport_offline_cb(rp);
4529 		break;
4530 
4531 	case BFA_RPORT_SM_DELETE:
4532 		bfa_stats(rp, sm_fwd_del);
4533 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4534 		break;
4535 
4536 	case BFA_RPORT_SM_HWFAIL:
4537 		bfa_stats(rp, sm_fwd_hwf);
4538 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4539 		bfa_rport_offline_cb(rp);
4540 		break;
4541 
4542 	default:
4543 		bfa_stats(rp, sm_fwd_unexp);
4544 		bfa_sm_fault(rp->bfa, event);
4545 	}
4546 }
4547 
4548 static void
4549 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4550 {
4551 	bfa_trc(rp->bfa, rp->rport_tag);
4552 	bfa_trc(rp->bfa, event);
4553 
4554 	switch (event) {
4555 	case BFA_RPORT_SM_QRESUME:
4556 		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4557 		bfa_rport_send_fwdelete(rp);
4558 		break;
4559 
4560 	case BFA_RPORT_SM_DELETE:
4561 		bfa_stats(rp, sm_fwd_del);
4562 		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4563 		break;
4564 
4565 	case BFA_RPORT_SM_HWFAIL:
4566 		bfa_stats(rp, sm_fwd_hwf);
4567 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4568 		bfa_reqq_wcancel(&rp->reqq_wait);
4569 		bfa_rport_offline_cb(rp);
4570 		break;
4571 
4572 	default:
4573 		bfa_stats(rp, sm_fwd_unexp);
4574 		bfa_sm_fault(rp->bfa, event);
4575 	}
4576 }
4577 
4578 /*
4579  * Offline state.
4580  */
4581 static void
4582 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4583 {
4584 	bfa_trc(rp->bfa, rp->rport_tag);
4585 	bfa_trc(rp->bfa, event);
4586 
4587 	switch (event) {
4588 	case BFA_RPORT_SM_DELETE:
4589 		bfa_stats(rp, sm_off_del);
4590 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4591 		bfa_rport_free(rp);
4592 		break;
4593 
4594 	case BFA_RPORT_SM_ONLINE:
4595 		bfa_stats(rp, sm_off_on);
4596 		if (bfa_rport_send_fwcreate(rp))
4597 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4598 		else
4599 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4600 		break;
4601 
4602 	case BFA_RPORT_SM_HWFAIL:
4603 		bfa_stats(rp, sm_off_hwf);
4604 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4605 		break;
4606 
4607 	case BFA_RPORT_SM_OFFLINE:
4608 		bfa_rport_offline_cb(rp);
4609 		break;
4610 
4611 	default:
4612 		bfa_stats(rp, sm_off_unexp);
4613 		bfa_sm_fault(rp->bfa, event);
4614 	}
4615 }
4616 
4617 /*
4618  * Rport is deleted, waiting for firmware response to delete.
4619  */
4620 static void
4621 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4622 {
4623 	bfa_trc(rp->bfa, rp->rport_tag);
4624 	bfa_trc(rp->bfa, event);
4625 
4626 	switch (event) {
4627 	case BFA_RPORT_SM_FWRSP:
4628 		bfa_stats(rp, sm_del_fwrsp);
4629 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4630 		bfa_rport_free(rp);
4631 		break;
4632 
4633 	case BFA_RPORT_SM_HWFAIL:
4634 		bfa_stats(rp, sm_del_hwf);
4635 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4636 		bfa_rport_free(rp);
4637 		break;
4638 
4639 	default:
4640 		bfa_sm_fault(rp->bfa, event);
4641 	}
4642 }
4643 
4644 static void
4645 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4646 {
4647 	bfa_trc(rp->bfa, rp->rport_tag);
4648 	bfa_trc(rp->bfa, event);
4649 
4650 	switch (event) {
4651 	case BFA_RPORT_SM_QRESUME:
4652 		bfa_stats(rp, sm_del_fwrsp);
4653 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4654 		bfa_rport_send_fwdelete(rp);
4655 		break;
4656 
4657 	case BFA_RPORT_SM_HWFAIL:
4658 		bfa_stats(rp, sm_del_hwf);
4659 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4660 		bfa_reqq_wcancel(&rp->reqq_wait);
4661 		bfa_rport_free(rp);
4662 		break;
4663 
4664 	default:
4665 		bfa_sm_fault(rp->bfa, event);
4666 	}
4667 }
4668 
4669 /*
4670  * Waiting for rport create response from firmware. A delete is pending.
4671  */
4672 static void
4673 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4674 				enum bfa_rport_event event)
4675 {
4676 	bfa_trc(rp->bfa, rp->rport_tag);
4677 	bfa_trc(rp->bfa, event);
4678 
4679 	switch (event) {
4680 	case BFA_RPORT_SM_FWRSP:
4681 		bfa_stats(rp, sm_delp_fwrsp);
4682 		if (bfa_rport_send_fwdelete(rp))
4683 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4684 		else
4685 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4686 		break;
4687 
4688 	case BFA_RPORT_SM_HWFAIL:
4689 		bfa_stats(rp, sm_delp_hwf);
4690 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4691 		bfa_rport_free(rp);
4692 		break;
4693 
4694 	default:
4695 		bfa_stats(rp, sm_delp_unexp);
4696 		bfa_sm_fault(rp->bfa, event);
4697 	}
4698 }
4699 
4700 /*
4701  * Waiting for rport create response from firmware. Rport offline is pending.
4702  */
4703 static void
4704 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4705 				 enum bfa_rport_event event)
4706 {
4707 	bfa_trc(rp->bfa, rp->rport_tag);
4708 	bfa_trc(rp->bfa, event);
4709 
4710 	switch (event) {
4711 	case BFA_RPORT_SM_FWRSP:
4712 		bfa_stats(rp, sm_offp_fwrsp);
4713 		if (bfa_rport_send_fwdelete(rp))
4714 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4715 		else
4716 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4717 		break;
4718 
4719 	case BFA_RPORT_SM_DELETE:
4720 		bfa_stats(rp, sm_offp_del);
4721 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4722 		break;
4723 
4724 	case BFA_RPORT_SM_HWFAIL:
4725 		bfa_stats(rp, sm_offp_hwf);
4726 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4727 		bfa_rport_offline_cb(rp);
4728 		break;
4729 
4730 	default:
4731 		bfa_stats(rp, sm_offp_unexp);
4732 		bfa_sm_fault(rp->bfa, event);
4733 	}
4734 }
4735 
4736 /*
4737  * IOC h/w failed.
4738  */
4739 static void
4740 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4741 {
4742 	bfa_trc(rp->bfa, rp->rport_tag);
4743 	bfa_trc(rp->bfa, event);
4744 
4745 	switch (event) {
4746 	case BFA_RPORT_SM_OFFLINE:
4747 		bfa_stats(rp, sm_iocd_off);
4748 		bfa_rport_offline_cb(rp);
4749 		break;
4750 
4751 	case BFA_RPORT_SM_DELETE:
4752 		bfa_stats(rp, sm_iocd_del);
4753 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4754 		bfa_rport_free(rp);
4755 		break;
4756 
4757 	case BFA_RPORT_SM_ONLINE:
4758 		bfa_stats(rp, sm_iocd_on);
4759 		if (bfa_rport_send_fwcreate(rp))
4760 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4761 		else
4762 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4763 		break;
4764 
4765 	case BFA_RPORT_SM_HWFAIL:
4766 		break;
4767 
4768 	default:
4769 		bfa_stats(rp, sm_iocd_unexp);
4770 		bfa_sm_fault(rp->bfa, event);
4771 	}
4772 }
4773 
4774 
4775 
4776 /*
4777  *  bfa_rport_private BFA rport private functions
4778  */
4779 
4780 static void
4781 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4782 {
4783 	struct bfa_rport_s *rp = cbarg;
4784 
4785 	if (complete)
4786 		bfa_cb_rport_online(rp->rport_drv);
4787 }
4788 
4789 static void
4790 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4791 {
4792 	struct bfa_rport_s *rp = cbarg;
4793 
4794 	if (complete)
4795 		bfa_cb_rport_offline(rp->rport_drv);
4796 }
4797 
4798 static void
4799 bfa_rport_qresume(void *cbarg)
4800 {
4801 	struct bfa_rport_s	*rp = cbarg;
4802 
4803 	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4804 }
4805 
4806 void
4807 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4808 		struct bfa_s *bfa)
4809 {
4810 	struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4811 
4812 	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4813 		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4814 
4815 	/* kva memory */
4816 	bfa_mem_kva_setup(minfo, rport_kva,
4817 		cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4818 }
4819 
4820 void
4821 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4822 		struct bfa_pcidev_s *pcidev)
4823 {
4824 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4825 	struct bfa_rport_s *rp;
4826 	u16 i;
4827 
4828 	INIT_LIST_HEAD(&mod->rp_free_q);
4829 	INIT_LIST_HEAD(&mod->rp_active_q);
4830 	INIT_LIST_HEAD(&mod->rp_unused_q);
4831 
4832 	rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4833 	mod->rps_list = rp;
4834 	mod->num_rports = cfg->fwcfg.num_rports;
4835 
4836 	WARN_ON(!mod->num_rports ||
4837 		   (mod->num_rports & (mod->num_rports - 1)));
4838 
4839 	for (i = 0; i < mod->num_rports; i++, rp++) {
4840 		memset(rp, 0, sizeof(struct bfa_rport_s));
4841 		rp->bfa = bfa;
4842 		rp->rport_tag = i;
4843 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4844 
4845 		/*
4846 		 *  - is unused
4847 		 */
4848 		if (i)
4849 			list_add_tail(&rp->qe, &mod->rp_free_q);
4850 
4851 		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4852 	}
4853 
4854 	/*
4855 	 * consume memory
4856 	 */
4857 	bfa_mem_kva_curp(mod) = (u8 *) rp;
4858 }
4859 
4860 void
4861 bfa_rport_iocdisable(struct bfa_s *bfa)
4862 {
4863 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4864 	struct bfa_rport_s *rport;
4865 	struct list_head *qe, *qen;
4866 
4867 	/* Enqueue unused rport resources to free_q */
4868 	list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4869 
4870 	list_for_each_safe(qe, qen, &mod->rp_active_q) {
4871 		rport = (struct bfa_rport_s *) qe;
4872 		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4873 	}
4874 }
4875 
4876 static struct bfa_rport_s *
4877 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4878 {
4879 	struct bfa_rport_s *rport;
4880 
4881 	bfa_q_deq(&mod->rp_free_q, &rport);
4882 	if (rport)
4883 		list_add_tail(&rport->qe, &mod->rp_active_q);
4884 
4885 	return rport;
4886 }
4887 
4888 static void
4889 bfa_rport_free(struct bfa_rport_s *rport)
4890 {
4891 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4892 
4893 	WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4894 	list_del(&rport->qe);
4895 	list_add_tail(&rport->qe, &mod->rp_free_q);
4896 }
4897 
4898 static bfa_boolean_t
4899 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4900 {
4901 	struct bfi_rport_create_req_s *m;
4902 
4903 	/*
4904 	 * check for room in queue to send request now
4905 	 */
4906 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4907 	if (!m) {
4908 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4909 		return BFA_FALSE;
4910 	}
4911 
4912 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4913 			bfa_fn_lpu(rp->bfa));
4914 	m->bfa_handle = rp->rport_tag;
4915 	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4916 	m->pid = rp->rport_info.pid;
4917 	m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4918 	m->local_pid = rp->rport_info.local_pid;
4919 	m->fc_class = rp->rport_info.fc_class;
4920 	m->vf_en = rp->rport_info.vf_en;
4921 	m->vf_id = rp->rport_info.vf_id;
4922 	m->cisc = rp->rport_info.cisc;
4923 
4924 	/*
4925 	 * queue I/O message to firmware
4926 	 */
4927 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4928 	return BFA_TRUE;
4929 }
4930 
4931 static bfa_boolean_t
4932 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4933 {
4934 	struct bfi_rport_delete_req_s *m;
4935 
4936 	/*
4937 	 * check for room in queue to send request now
4938 	 */
4939 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4940 	if (!m) {
4941 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4942 		return BFA_FALSE;
4943 	}
4944 
4945 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4946 			bfa_fn_lpu(rp->bfa));
4947 	m->fw_handle = rp->fw_handle;
4948 
4949 	/*
4950 	 * queue I/O message to firmware
4951 	 */
4952 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4953 	return BFA_TRUE;
4954 }
4955 
4956 static bfa_boolean_t
4957 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4958 {
4959 	struct bfa_rport_speed_req_s *m;
4960 
4961 	/*
4962 	 * check for room in queue to send request now
4963 	 */
4964 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4965 	if (!m) {
4966 		bfa_trc(rp->bfa, rp->rport_info.speed);
4967 		return BFA_FALSE;
4968 	}
4969 
4970 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4971 			bfa_fn_lpu(rp->bfa));
4972 	m->fw_handle = rp->fw_handle;
4973 	m->speed = (u8)rp->rport_info.speed;
4974 
4975 	/*
4976 	 * queue I/O message to firmware
4977 	 */
4978 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4979 	return BFA_TRUE;
4980 }
4981 
4982 
4983 
4984 /*
4985  *  bfa_rport_public
4986  */
4987 
4988 /*
4989  * Rport interrupt processing.
4990  */
4991 void
4992 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4993 {
4994 	union bfi_rport_i2h_msg_u msg;
4995 	struct bfa_rport_s *rp;
4996 
4997 	bfa_trc(bfa, m->mhdr.msg_id);
4998 
4999 	msg.msg = m;
5000 
5001 	switch (m->mhdr.msg_id) {
5002 	case BFI_RPORT_I2H_CREATE_RSP:
5003 		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
5004 		rp->fw_handle = msg.create_rsp->fw_handle;
5005 		rp->qos_attr = msg.create_rsp->qos_attr;
5006 		bfa_rport_set_lunmask(bfa, rp);
5007 		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
5008 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5009 		break;
5010 
5011 	case BFI_RPORT_I2H_DELETE_RSP:
5012 		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
5013 		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
5014 		bfa_rport_unset_lunmask(bfa, rp);
5015 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5016 		break;
5017 
5018 	case BFI_RPORT_I2H_QOS_SCN:
5019 		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
5020 		rp->event_arg.fw_msg = msg.qos_scn_evt;
5021 		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
5022 		break;
5023 
5024 	case BFI_RPORT_I2H_LIP_SCN_ONLINE:
5025 		bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
5026 				&msg.lip_scn->loop_info);
5027 		bfa_cb_rport_scn_online(bfa);
5028 		break;
5029 
5030 	case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
5031 		bfa_cb_rport_scn_offline(bfa);
5032 		break;
5033 
5034 	case BFI_RPORT_I2H_NO_DEV:
5035 		rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
5036 		bfa_cb_rport_scn_no_dev(rp->rport_drv);
5037 		break;
5038 
5039 	default:
5040 		bfa_trc(bfa, m->mhdr.msg_id);
5041 		WARN_ON(1);
5042 	}
5043 }
5044 
5045 void
5046 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
5047 {
5048 	struct bfa_rport_mod_s	*mod = BFA_RPORT_MOD(bfa);
5049 	struct list_head	*qe;
5050 	int	i;
5051 
5052 	for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
5053 		bfa_q_deq_tail(&mod->rp_free_q, &qe);
5054 		list_add_tail(qe, &mod->rp_unused_q);
5055 	}
5056 }
5057 
5058 /*
5059  *  bfa_rport_api
5060  */
5061 
5062 struct bfa_rport_s *
5063 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
5064 {
5065 	struct bfa_rport_s *rp;
5066 
5067 	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
5068 
5069 	if (rp == NULL)
5070 		return NULL;
5071 
5072 	rp->bfa = bfa;
5073 	rp->rport_drv = rport_drv;
5074 	memset(&rp->stats, 0, sizeof(rp->stats));
5075 
5076 	WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
5077 	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
5078 
5079 	return rp;
5080 }
5081 
5082 void
5083 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
5084 {
5085 	WARN_ON(rport_info->max_frmsz == 0);
5086 
5087 	/*
5088 	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
5089 	 * responses. Default to minimum size.
5090 	 */
5091 	if (rport_info->max_frmsz == 0) {
5092 		bfa_trc(rport->bfa, rport->rport_tag);
5093 		rport_info->max_frmsz = FC_MIN_PDUSZ;
5094 	}
5095 
5096 	rport->rport_info = *rport_info;
5097 	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
5098 }
5099 
5100 void
5101 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
5102 {
5103 	WARN_ON(speed == 0);
5104 	WARN_ON(speed == BFA_PORT_SPEED_AUTO);
5105 
5106 	if (rport) {
5107 		rport->rport_info.speed = speed;
5108 		bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
5109 	}
5110 }
5111 
5112 /* Set Rport LUN Mask */
5113 void
5114 bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5115 {
5116 	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
5117 	wwn_t	lp_wwn, rp_wwn;
5118 	u8 lp_tag = (u8)rp->rport_info.lp_tag;
5119 
5120 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5121 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5122 
5123 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5124 					rp->lun_mask = BFA_TRUE;
5125 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
5126 }
5127 
5128 /* Unset Rport LUN mask */
5129 void
5130 bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5131 {
5132 	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
5133 	wwn_t	lp_wwn, rp_wwn;
5134 
5135 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5136 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5137 
5138 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5139 				rp->lun_mask = BFA_FALSE;
5140 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
5141 			BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
5142 }
5143 
5144 /*
5145  * SGPG related functions
5146  */
5147 
5148 /*
5149  * Compute and return memory needed by FCP(im) module.
5150  */
5151 void
5152 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5153 		struct bfa_s *bfa)
5154 {
5155 	struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
5156 	struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
5157 	struct bfa_mem_dma_s *seg_ptr;
5158 	u16	nsegs, idx, per_seg_sgpg, num_sgpg;
5159 	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
5160 
5161 	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
5162 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
5163 	else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
5164 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
5165 
5166 	num_sgpg = cfg->drvcfg.num_sgpgs;
5167 
5168 	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5169 	per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
5170 
5171 	bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
5172 		if (num_sgpg >= per_seg_sgpg) {
5173 			num_sgpg -= per_seg_sgpg;
5174 			bfa_mem_dma_setup(minfo, seg_ptr,
5175 					per_seg_sgpg * sgpg_sz);
5176 		} else
5177 			bfa_mem_dma_setup(minfo, seg_ptr,
5178 					num_sgpg * sgpg_sz);
5179 	}
5180 
5181 	/* kva memory */
5182 	bfa_mem_kva_setup(minfo, sgpg_kva,
5183 		cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
5184 }
5185 
5186 void
5187 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5188 		struct bfa_pcidev_s *pcidev)
5189 {
5190 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5191 	struct bfa_sgpg_s *hsgpg;
5192 	struct bfi_sgpg_s *sgpg;
5193 	u64 align_len;
5194 	struct bfa_mem_dma_s *seg_ptr;
5195 	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
5196 	u16	i, idx, nsegs, per_seg_sgpg, num_sgpg;
5197 
5198 	union {
5199 		u64 pa;
5200 		union bfi_addr_u addr;
5201 	} sgpg_pa, sgpg_pa_tmp;
5202 
5203 	INIT_LIST_HEAD(&mod->sgpg_q);
5204 	INIT_LIST_HEAD(&mod->sgpg_wait_q);
5205 
5206 	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5207 
5208 	mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
5209 
5210 	num_sgpg = cfg->drvcfg.num_sgpgs;
5211 	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5212 
5213 	/* dma/kva mem claim */
5214 	hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
5215 
5216 	bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
5217 
5218 		if (!bfa_mem_dma_virt(seg_ptr))
5219 			break;
5220 
5221 		align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5222 					     bfa_mem_dma_phys(seg_ptr);
5223 
5224 		sgpg = (struct bfi_sgpg_s *)
5225 			(((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5226 		sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5227 		WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5228 
5229 		per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5230 
5231 		for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5232 			memset(hsgpg, 0, sizeof(*hsgpg));
5233 			memset(sgpg, 0, sizeof(*sgpg));
5234 
5235 			hsgpg->sgpg = sgpg;
5236 			sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5237 			hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5238 			list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5239 
5240 			sgpg++;
5241 			hsgpg++;
5242 			sgpg_pa.pa += sgpg_sz;
5243 		}
5244 	}
5245 
5246 	bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
5247 }
5248 
5249 bfa_status_t
5250 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5251 {
5252 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5253 	struct bfa_sgpg_s *hsgpg;
5254 	int i;
5255 
5256 	if (mod->free_sgpgs < nsgpgs)
5257 		return BFA_STATUS_ENOMEM;
5258 
5259 	for (i = 0; i < nsgpgs; i++) {
5260 		bfa_q_deq(&mod->sgpg_q, &hsgpg);
5261 		WARN_ON(!hsgpg);
5262 		list_add_tail(&hsgpg->qe, sgpg_q);
5263 	}
5264 
5265 	mod->free_sgpgs -= nsgpgs;
5266 	return BFA_STATUS_OK;
5267 }
5268 
5269 void
5270 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5271 {
5272 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5273 	struct bfa_sgpg_wqe_s *wqe;
5274 
5275 	mod->free_sgpgs += nsgpg;
5276 	WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5277 
5278 	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5279 
5280 	if (list_empty(&mod->sgpg_wait_q))
5281 		return;
5282 
5283 	/*
5284 	 * satisfy as many waiting requests as possible
5285 	 */
5286 	do {
5287 		wqe = bfa_q_first(&mod->sgpg_wait_q);
5288 		if (mod->free_sgpgs < wqe->nsgpg)
5289 			nsgpg = mod->free_sgpgs;
5290 		else
5291 			nsgpg = wqe->nsgpg;
5292 		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5293 		wqe->nsgpg -= nsgpg;
5294 		if (wqe->nsgpg == 0) {
5295 			list_del(&wqe->qe);
5296 			wqe->cbfn(wqe->cbarg);
5297 		}
5298 	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5299 }
5300 
5301 void
5302 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5303 {
5304 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5305 
5306 	WARN_ON(nsgpg <= 0);
5307 	WARN_ON(nsgpg <= mod->free_sgpgs);
5308 
5309 	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5310 
5311 	/*
5312 	 * allocate any left to this one first
5313 	 */
5314 	if (mod->free_sgpgs) {
5315 		/*
5316 		 * no one else is waiting for SGPG
5317 		 */
5318 		WARN_ON(!list_empty(&mod->sgpg_wait_q));
5319 		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5320 		wqe->nsgpg -= mod->free_sgpgs;
5321 		mod->free_sgpgs = 0;
5322 	}
5323 
5324 	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5325 }
5326 
5327 void
5328 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5329 {
5330 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5331 
5332 	WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5333 	list_del(&wqe->qe);
5334 
5335 	if (wqe->nsgpg_total != wqe->nsgpg)
5336 		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5337 				   wqe->nsgpg_total - wqe->nsgpg);
5338 }
5339 
5340 void
5341 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5342 		   void *cbarg)
5343 {
5344 	INIT_LIST_HEAD(&wqe->sgpg_q);
5345 	wqe->cbfn = cbfn;
5346 	wqe->cbarg = cbarg;
5347 }
5348 
5349 /*
5350  *  UF related functions
5351  */
5352 /*
5353  *****************************************************************************
5354  * Internal functions
5355  *****************************************************************************
5356  */
5357 static void
5358 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5359 {
5360 	struct bfa_uf_s   *uf = cbarg;
5361 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5362 
5363 	if (complete)
5364 		ufm->ufrecv(ufm->cbarg, uf);
5365 }
5366 
5367 static void
5368 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5369 {
5370 	struct bfi_uf_buf_post_s *uf_bp_msg;
5371 	u16 i;
5372 	u16 buf_len;
5373 
5374 	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5375 	uf_bp_msg = ufm->uf_buf_posts;
5376 
5377 	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5378 	     i++, uf_bp_msg++) {
5379 		memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5380 
5381 		uf_bp_msg->buf_tag = i;
5382 		buf_len = sizeof(struct bfa_uf_buf_s);
5383 		uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5384 		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5385 			    bfa_fn_lpu(ufm->bfa));
5386 		bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5387 	}
5388 
5389 	/*
5390 	 * advance pointer beyond consumed memory
5391 	 */
5392 	bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5393 }
5394 
5395 static void
5396 claim_ufs(struct bfa_uf_mod_s *ufm)
5397 {
5398 	u16 i;
5399 	struct bfa_uf_s   *uf;
5400 
5401 	/*
5402 	 * Claim block of memory for UF list
5403 	 */
5404 	ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5405 
5406 	/*
5407 	 * Initialize UFs and queue it in UF free queue
5408 	 */
5409 	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5410 		memset(uf, 0, sizeof(struct bfa_uf_s));
5411 		uf->bfa = ufm->bfa;
5412 		uf->uf_tag = i;
5413 		uf->pb_len = BFA_PER_UF_DMA_SZ;
5414 		uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5415 		uf->buf_pa = ufm_pbs_pa(ufm, i);
5416 		list_add_tail(&uf->qe, &ufm->uf_free_q);
5417 	}
5418 
5419 	/*
5420 	 * advance memory pointer
5421 	 */
5422 	bfa_mem_kva_curp(ufm) = (u8 *) uf;
5423 }
5424 
5425 static void
5426 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5427 {
5428 	claim_ufs(ufm);
5429 	claim_uf_post_msgs(ufm);
5430 }
5431 
5432 void
5433 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5434 		struct bfa_s *bfa)
5435 {
5436 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5437 	struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5438 	u32	num_ufs = cfg->fwcfg.num_uf_bufs;
5439 	struct bfa_mem_dma_s *seg_ptr;
5440 	u16	nsegs, idx, per_seg_uf = 0;
5441 
5442 	nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5443 	per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5444 
5445 	bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5446 		if (num_ufs >= per_seg_uf) {
5447 			num_ufs -= per_seg_uf;
5448 			bfa_mem_dma_setup(minfo, seg_ptr,
5449 				per_seg_uf * BFA_PER_UF_DMA_SZ);
5450 		} else
5451 			bfa_mem_dma_setup(minfo, seg_ptr,
5452 				num_ufs * BFA_PER_UF_DMA_SZ);
5453 	}
5454 
5455 	/* kva memory */
5456 	bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5457 		(sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5458 }
5459 
5460 void
5461 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5462 		struct bfa_pcidev_s *pcidev)
5463 {
5464 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5465 
5466 	ufm->bfa = bfa;
5467 	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5468 	INIT_LIST_HEAD(&ufm->uf_free_q);
5469 	INIT_LIST_HEAD(&ufm->uf_posted_q);
5470 	INIT_LIST_HEAD(&ufm->uf_unused_q);
5471 
5472 	uf_mem_claim(ufm);
5473 }
5474 
5475 static struct bfa_uf_s *
5476 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5477 {
5478 	struct bfa_uf_s   *uf;
5479 
5480 	bfa_q_deq(&uf_mod->uf_free_q, &uf);
5481 	return uf;
5482 }
5483 
5484 static void
5485 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5486 {
5487 	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5488 }
5489 
5490 static bfa_status_t
5491 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5492 {
5493 	struct bfi_uf_buf_post_s *uf_post_msg;
5494 
5495 	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5496 	if (!uf_post_msg)
5497 		return BFA_STATUS_FAILED;
5498 
5499 	memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5500 		      sizeof(struct bfi_uf_buf_post_s));
5501 	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5502 
5503 	bfa_trc(ufm->bfa, uf->uf_tag);
5504 
5505 	list_add_tail(&uf->qe, &ufm->uf_posted_q);
5506 	return BFA_STATUS_OK;
5507 }
5508 
5509 static void
5510 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5511 {
5512 	struct bfa_uf_s   *uf;
5513 
5514 	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5515 		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5516 			break;
5517 	}
5518 }
5519 
5520 static void
5521 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5522 {
5523 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5524 	u16 uf_tag = m->buf_tag;
5525 	struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5526 	struct bfa_uf_buf_s *uf_buf;
5527 	uint8_t *buf;
5528 	struct fchs_s *fchs;
5529 
5530 	uf_buf = (struct bfa_uf_buf_s *)
5531 			bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5532 	buf = &uf_buf->d[0];
5533 
5534 	m->frm_len = be16_to_cpu(m->frm_len);
5535 	m->xfr_len = be16_to_cpu(m->xfr_len);
5536 
5537 	fchs = (struct fchs_s *)uf_buf;
5538 
5539 	list_del(&uf->qe);	/* dequeue from posted queue */
5540 
5541 	uf->data_ptr = buf;
5542 	uf->data_len = m->xfr_len;
5543 
5544 	WARN_ON(uf->data_len < sizeof(struct fchs_s));
5545 
5546 	if (uf->data_len == sizeof(struct fchs_s)) {
5547 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5548 			       uf->data_len, (struct fchs_s *)buf);
5549 	} else {
5550 		u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5551 		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5552 				      BFA_PL_EID_RX, uf->data_len,
5553 				      (struct fchs_s *)buf, pld_w0);
5554 	}
5555 
5556 	if (bfa->fcs)
5557 		__bfa_cb_uf_recv(uf, BFA_TRUE);
5558 	else
5559 		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5560 }
5561 
5562 void
5563 bfa_uf_iocdisable(struct bfa_s *bfa)
5564 {
5565 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5566 	struct bfa_uf_s *uf;
5567 	struct list_head *qe, *qen;
5568 
5569 	/* Enqueue unused uf resources to free_q */
5570 	list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5571 
5572 	list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5573 		uf = (struct bfa_uf_s *) qe;
5574 		list_del(&uf->qe);
5575 		bfa_uf_put(ufm, uf);
5576 	}
5577 }
5578 
5579 void
5580 bfa_uf_start(struct bfa_s *bfa)
5581 {
5582 	bfa_uf_post_all(BFA_UF_MOD(bfa));
5583 }
5584 
5585 /*
5586  * Register handler for all unsolicted receive frames.
5587  *
5588  * @param[in]	bfa		BFA instance
5589  * @param[in]	ufrecv	receive handler function
5590  * @param[in]	cbarg	receive handler arg
5591  */
5592 void
5593 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5594 {
5595 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5596 
5597 	ufm->ufrecv = ufrecv;
5598 	ufm->cbarg = cbarg;
5599 }
5600 
5601 /*
5602  *	Free an unsolicited frame back to BFA.
5603  *
5604  * @param[in]		uf		unsolicited frame to be freed
5605  *
5606  * @return None
5607  */
5608 void
5609 bfa_uf_free(struct bfa_uf_s *uf)
5610 {
5611 	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5612 	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5613 }
5614 
5615 
5616 
5617 /*
5618  *  uf_pub BFA uf module public functions
5619  */
5620 void
5621 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5622 {
5623 	bfa_trc(bfa, msg->mhdr.msg_id);
5624 
5625 	switch (msg->mhdr.msg_id) {
5626 	case BFI_UF_I2H_FRM_RCVD:
5627 		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5628 		break;
5629 
5630 	default:
5631 		bfa_trc(bfa, msg->mhdr.msg_id);
5632 		WARN_ON(1);
5633 	}
5634 }
5635 
5636 void
5637 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5638 {
5639 	struct bfa_uf_mod_s	*mod = BFA_UF_MOD(bfa);
5640 	struct list_head	*qe;
5641 	int	i;
5642 
5643 	for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5644 		bfa_q_deq_tail(&mod->uf_free_q, &qe);
5645 		list_add_tail(qe, &mod->uf_unused_q);
5646 	}
5647 }
5648 
5649 /*
5650  *	Dport forward declaration
5651  */
5652 
5653 enum bfa_dport_test_state_e {
5654 	BFA_DPORT_ST_DISABLED	= 0,	/*!< dport is disabled */
5655 	BFA_DPORT_ST_INP	= 1,	/*!< test in progress */
5656 	BFA_DPORT_ST_COMP	= 2,	/*!< test complete successfully */
5657 	BFA_DPORT_ST_NO_SFP	= 3,	/*!< sfp is not present */
5658 	BFA_DPORT_ST_NOTSTART	= 4,	/*!< test not start dport is enabled */
5659 };
5660 
5661 /*
5662  * BFA DPORT state machine events
5663  */
5664 enum bfa_dport_sm_event {
5665 	BFA_DPORT_SM_ENABLE	= 1,	/* dport enable event         */
5666 	BFA_DPORT_SM_DISABLE    = 2,    /* dport disable event        */
5667 	BFA_DPORT_SM_FWRSP      = 3,    /* fw enable/disable rsp      */
5668 	BFA_DPORT_SM_QRESUME    = 4,    /* CQ space available         */
5669 	BFA_DPORT_SM_HWFAIL     = 5,    /* IOC h/w failure            */
5670 	BFA_DPORT_SM_START	= 6,	/* re-start dport test        */
5671 	BFA_DPORT_SM_REQFAIL	= 7,	/* request failure            */
5672 	BFA_DPORT_SM_SCN	= 8,	/* state change notify frm fw */
5673 };
5674 
5675 static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5676 				  enum bfa_dport_sm_event event);
5677 static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5678 				  enum bfa_dport_sm_event event);
5679 static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5680 				  enum bfa_dport_sm_event event);
5681 static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5682 				 enum bfa_dport_sm_event event);
5683 static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5684 				 enum bfa_dport_sm_event event);
5685 static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5686 				   enum bfa_dport_sm_event event);
5687 static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
5688 					enum bfa_dport_sm_event event);
5689 static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
5690 				  enum bfa_dport_sm_event event);
5691 static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
5692 				   enum bfa_dport_sm_event event);
5693 static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
5694 				   enum bfa_dport_sm_event event);
5695 static void bfa_dport_qresume(void *cbarg);
5696 static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5697 				struct bfi_diag_dport_rsp_s *msg);
5698 static void bfa_dport_scn(struct bfa_dport_s *dport,
5699 				struct bfi_diag_dport_scn_s *msg);
5700 
5701 /*
5702  *	BFA fcdiag module
5703  */
5704 #define BFA_DIAG_QTEST_TOV	1000    /* msec */
5705 
5706 /*
5707  *	Set port status to busy
5708  */
5709 static void
5710 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5711 {
5712 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5713 
5714 	if (fcdiag->lb.lock)
5715 		fcport->diag_busy = BFA_TRUE;
5716 	else
5717 		fcport->diag_busy = BFA_FALSE;
5718 }
5719 
5720 void
5721 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5722 		struct bfa_pcidev_s *pcidev)
5723 {
5724 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5725 	struct bfa_dport_s  *dport = &fcdiag->dport;
5726 
5727 	fcdiag->bfa             = bfa;
5728 	fcdiag->trcmod  = bfa->trcmod;
5729 	/* The common DIAG attach bfa_diag_attach() will do all memory claim */
5730 	dport->bfa = bfa;
5731 	bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5732 	bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5733 	dport->cbfn = NULL;
5734 	dport->cbarg = NULL;
5735 	dport->test_state = BFA_DPORT_ST_DISABLED;
5736 	memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
5737 }
5738 
5739 void
5740 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5741 {
5742 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5743 	struct bfa_dport_s *dport = &fcdiag->dport;
5744 
5745 	bfa_trc(fcdiag, fcdiag->lb.lock);
5746 	if (fcdiag->lb.lock) {
5747 		fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5748 		fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5749 		fcdiag->lb.lock = 0;
5750 		bfa_fcdiag_set_busy_status(fcdiag);
5751 	}
5752 
5753 	bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5754 }
5755 
5756 static void
5757 bfa_fcdiag_queuetest_timeout(void *cbarg)
5758 {
5759 	struct bfa_fcdiag_s       *fcdiag = cbarg;
5760 	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5761 
5762 	bfa_trc(fcdiag, fcdiag->qtest.all);
5763 	bfa_trc(fcdiag, fcdiag->qtest.count);
5764 
5765 	fcdiag->qtest.timer_active = 0;
5766 
5767 	res->status = BFA_STATUS_ETIMER;
5768 	res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5769 	if (fcdiag->qtest.all)
5770 		res->queue  = fcdiag->qtest.all;
5771 
5772 	bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5773 	fcdiag->qtest.status = BFA_STATUS_ETIMER;
5774 	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5775 	fcdiag->qtest.lock = 0;
5776 }
5777 
5778 static bfa_status_t
5779 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5780 {
5781 	u32	i;
5782 	struct bfi_diag_qtest_req_s *req;
5783 
5784 	req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5785 	if (!req)
5786 		return BFA_STATUS_DEVBUSY;
5787 
5788 	/* build host command */
5789 	bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5790 		bfa_fn_lpu(fcdiag->bfa));
5791 
5792 	for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5793 		req->data[i] = QTEST_PAT_DEFAULT;
5794 
5795 	bfa_trc(fcdiag, fcdiag->qtest.queue);
5796 	/* ring door bell */
5797 	bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5798 	return BFA_STATUS_OK;
5799 }
5800 
5801 static void
5802 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5803 			bfi_diag_qtest_rsp_t *rsp)
5804 {
5805 	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5806 	bfa_status_t status = BFA_STATUS_OK;
5807 	int i;
5808 
5809 	/* Check timer, should still be active   */
5810 	if (!fcdiag->qtest.timer_active) {
5811 		bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5812 		return;
5813 	}
5814 
5815 	/* update count */
5816 	fcdiag->qtest.count--;
5817 
5818 	/* Check result */
5819 	for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5820 		if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5821 			res->status = BFA_STATUS_DATACORRUPTED;
5822 			break;
5823 		}
5824 	}
5825 
5826 	if (res->status == BFA_STATUS_OK) {
5827 		if (fcdiag->qtest.count > 0) {
5828 			status = bfa_fcdiag_queuetest_send(fcdiag);
5829 			if (status == BFA_STATUS_OK)
5830 				return;
5831 			else
5832 				res->status = status;
5833 		} else if (fcdiag->qtest.all > 0 &&
5834 			fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5835 			fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5836 			fcdiag->qtest.queue++;
5837 			status = bfa_fcdiag_queuetest_send(fcdiag);
5838 			if (status == BFA_STATUS_OK)
5839 				return;
5840 			else
5841 				res->status = status;
5842 		}
5843 	}
5844 
5845 	/* Stop timer when we comp all queue */
5846 	if (fcdiag->qtest.timer_active) {
5847 		bfa_timer_stop(&fcdiag->qtest.timer);
5848 		fcdiag->qtest.timer_active = 0;
5849 	}
5850 	res->queue = fcdiag->qtest.queue;
5851 	res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5852 	bfa_trc(fcdiag, res->count);
5853 	bfa_trc(fcdiag, res->status);
5854 	fcdiag->qtest.status = res->status;
5855 	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5856 	fcdiag->qtest.lock = 0;
5857 }
5858 
5859 static void
5860 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5861 			struct bfi_diag_lb_rsp_s *rsp)
5862 {
5863 	struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5864 
5865 	res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
5866 	res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
5867 	res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
5868 	res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
5869 	res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
5870 	res->status     = rsp->res.status;
5871 	fcdiag->lb.status = rsp->res.status;
5872 	bfa_trc(fcdiag, fcdiag->lb.status);
5873 	fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5874 	fcdiag->lb.lock = 0;
5875 	bfa_fcdiag_set_busy_status(fcdiag);
5876 }
5877 
5878 static bfa_status_t
5879 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5880 			struct bfa_diag_loopback_s *loopback)
5881 {
5882 	struct bfi_diag_lb_req_s *lb_req;
5883 
5884 	lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5885 	if (!lb_req)
5886 		return BFA_STATUS_DEVBUSY;
5887 
5888 	/* build host command */
5889 	bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5890 		bfa_fn_lpu(fcdiag->bfa));
5891 
5892 	lb_req->lb_mode = loopback->lb_mode;
5893 	lb_req->speed = loopback->speed;
5894 	lb_req->loopcnt = loopback->loopcnt;
5895 	lb_req->pattern = loopback->pattern;
5896 
5897 	/* ring door bell */
5898 	bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5899 
5900 	bfa_trc(fcdiag, loopback->lb_mode);
5901 	bfa_trc(fcdiag, loopback->speed);
5902 	bfa_trc(fcdiag, loopback->loopcnt);
5903 	bfa_trc(fcdiag, loopback->pattern);
5904 	return BFA_STATUS_OK;
5905 }
5906 
5907 /*
5908  *	cpe/rme intr handler
5909  */
5910 void
5911 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5912 {
5913 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5914 
5915 	switch (msg->mhdr.msg_id) {
5916 	case BFI_DIAG_I2H_LOOPBACK:
5917 		bfa_fcdiag_loopback_comp(fcdiag,
5918 				(struct bfi_diag_lb_rsp_s *) msg);
5919 		break;
5920 	case BFI_DIAG_I2H_QTEST:
5921 		bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5922 		break;
5923 	case BFI_DIAG_I2H_DPORT:
5924 		bfa_dport_req_comp(&fcdiag->dport,
5925 				(struct bfi_diag_dport_rsp_s *)msg);
5926 		break;
5927 	case BFI_DIAG_I2H_DPORT_SCN:
5928 		bfa_dport_scn(&fcdiag->dport,
5929 				(struct bfi_diag_dport_scn_s *)msg);
5930 		break;
5931 	default:
5932 		bfa_trc(fcdiag, msg->mhdr.msg_id);
5933 		WARN_ON(1);
5934 	}
5935 }
5936 
5937 /*
5938  *	Loopback test
5939  *
5940  *   @param[in] *bfa            - bfa data struct
5941  *   @param[in] opmode          - port operation mode
5942  *   @param[in] speed           - port speed
5943  *   @param[in] lpcnt           - loop count
5944  *   @param[in] pat                     - pattern to build packet
5945  *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
5946  *   @param[in] cbfn            - callback function
5947  *   @param[in] cbarg           - callback functioin arg
5948  *
5949  *   @param[out]
5950  */
5951 bfa_status_t
5952 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5953 		enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5954 		struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5955 		void *cbarg)
5956 {
5957 	struct  bfa_diag_loopback_s loopback;
5958 	struct bfa_port_attr_s attr;
5959 	bfa_status_t status;
5960 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5961 
5962 	if (!bfa_iocfc_is_operational(bfa))
5963 		return BFA_STATUS_IOC_NON_OP;
5964 
5965 	/* if port is PBC disabled, return error */
5966 	if (bfa_fcport_is_pbcdisabled(bfa)) {
5967 		bfa_trc(fcdiag, BFA_STATUS_PBC);
5968 		return BFA_STATUS_PBC;
5969 	}
5970 
5971 	if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5972 		bfa_trc(fcdiag, opmode);
5973 		return BFA_STATUS_PORT_NOT_DISABLED;
5974 	}
5975 
5976 	/*
5977 	 * Check if input speed is supported by the port mode
5978 	 */
5979 	if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5980 		if (!(speed == BFA_PORT_SPEED_1GBPS ||
5981 		      speed == BFA_PORT_SPEED_2GBPS ||
5982 		      speed == BFA_PORT_SPEED_4GBPS ||
5983 		      speed == BFA_PORT_SPEED_8GBPS ||
5984 		      speed == BFA_PORT_SPEED_16GBPS ||
5985 		      speed == BFA_PORT_SPEED_AUTO)) {
5986 			bfa_trc(fcdiag, speed);
5987 			return BFA_STATUS_UNSUPP_SPEED;
5988 		}
5989 		bfa_fcport_get_attr(bfa, &attr);
5990 		bfa_trc(fcdiag, attr.speed_supported);
5991 		if (speed > attr.speed_supported)
5992 			return BFA_STATUS_UNSUPP_SPEED;
5993 	} else {
5994 		if (speed != BFA_PORT_SPEED_10GBPS) {
5995 			bfa_trc(fcdiag, speed);
5996 			return BFA_STATUS_UNSUPP_SPEED;
5997 		}
5998 	}
5999 
6000 	/*
6001 	 * For CT2, 1G is not supported
6002 	 */
6003 	if ((speed == BFA_PORT_SPEED_1GBPS) &&
6004 	    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
6005 		bfa_trc(fcdiag, speed);
6006 		return BFA_STATUS_UNSUPP_SPEED;
6007 	}
6008 
6009 	/* For Mezz card, port speed entered needs to be checked */
6010 	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
6011 		if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
6012 			if (!(speed == BFA_PORT_SPEED_1GBPS ||
6013 			      speed == BFA_PORT_SPEED_2GBPS ||
6014 			      speed == BFA_PORT_SPEED_4GBPS ||
6015 			      speed == BFA_PORT_SPEED_8GBPS ||
6016 			      speed == BFA_PORT_SPEED_16GBPS ||
6017 			      speed == BFA_PORT_SPEED_AUTO))
6018 				return BFA_STATUS_UNSUPP_SPEED;
6019 		} else {
6020 			if (speed != BFA_PORT_SPEED_10GBPS)
6021 				return BFA_STATUS_UNSUPP_SPEED;
6022 		}
6023 	}
6024 	/* check to see if fcport is dport */
6025 	if (bfa_fcport_is_dport(bfa)) {
6026 		bfa_trc(fcdiag, fcdiag->lb.lock);
6027 		return BFA_STATUS_DPORT_ENABLED;
6028 	}
6029 	/* check to see if there is another destructive diag cmd running */
6030 	if (fcdiag->lb.lock) {
6031 		bfa_trc(fcdiag, fcdiag->lb.lock);
6032 		return BFA_STATUS_DEVBUSY;
6033 	}
6034 
6035 	fcdiag->lb.lock = 1;
6036 	loopback.lb_mode = opmode;
6037 	loopback.speed = speed;
6038 	loopback.loopcnt = lpcnt;
6039 	loopback.pattern = pat;
6040 	fcdiag->lb.result = result;
6041 	fcdiag->lb.cbfn = cbfn;
6042 	fcdiag->lb.cbarg = cbarg;
6043 	memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
6044 	bfa_fcdiag_set_busy_status(fcdiag);
6045 
6046 	/* Send msg to fw */
6047 	status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
6048 	return status;
6049 }
6050 
6051 /*
6052  *	DIAG queue test command
6053  *
6054  *   @param[in] *bfa            - bfa data struct
6055  *   @param[in] force           - 1: don't do ioc op checking
6056  *   @param[in] queue           - queue no. to test
6057  *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
6058  *   @param[in] cbfn            - callback function
6059  *   @param[in] *cbarg          - callback functioin arg
6060  *
6061  *   @param[out]
6062  */
6063 bfa_status_t
6064 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
6065 		struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
6066 		void *cbarg)
6067 {
6068 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6069 	bfa_status_t status;
6070 	bfa_trc(fcdiag, force);
6071 	bfa_trc(fcdiag, queue);
6072 
6073 	if (!force && !bfa_iocfc_is_operational(bfa))
6074 		return BFA_STATUS_IOC_NON_OP;
6075 
6076 	/* check to see if there is another destructive diag cmd running */
6077 	if (fcdiag->qtest.lock) {
6078 		bfa_trc(fcdiag, fcdiag->qtest.lock);
6079 		return BFA_STATUS_DEVBUSY;
6080 	}
6081 
6082 	/* Initialization */
6083 	fcdiag->qtest.lock = 1;
6084 	fcdiag->qtest.cbfn = cbfn;
6085 	fcdiag->qtest.cbarg = cbarg;
6086 	fcdiag->qtest.result = result;
6087 	fcdiag->qtest.count = QTEST_CNT_DEFAULT;
6088 
6089 	/* Init test results */
6090 	fcdiag->qtest.result->status = BFA_STATUS_OK;
6091 	fcdiag->qtest.result->count  = 0;
6092 
6093 	/* send */
6094 	if (queue < BFI_IOC_MAX_CQS) {
6095 		fcdiag->qtest.result->queue  = (u8)queue;
6096 		fcdiag->qtest.queue = (u8)queue;
6097 		fcdiag->qtest.all   = 0;
6098 	} else {
6099 		fcdiag->qtest.result->queue  = 0;
6100 		fcdiag->qtest.queue = 0;
6101 		fcdiag->qtest.all   = 1;
6102 	}
6103 	status = bfa_fcdiag_queuetest_send(fcdiag);
6104 
6105 	/* Start a timer */
6106 	if (status == BFA_STATUS_OK) {
6107 		bfa_timer_start(bfa, &fcdiag->qtest.timer,
6108 				bfa_fcdiag_queuetest_timeout, fcdiag,
6109 				BFA_DIAG_QTEST_TOV);
6110 		fcdiag->qtest.timer_active = 1;
6111 	}
6112 	return status;
6113 }
6114 
6115 /*
6116  * DIAG PLB is running
6117  *
6118  *   @param[in] *bfa    - bfa data struct
6119  *
6120  *   @param[out]
6121  */
6122 bfa_status_t
6123 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6124 {
6125 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6126 	return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
6127 }
6128 
6129 /*
6130  *	D-port
6131  */
6132 #define bfa_dport_result_start(__dport, __mode) do {				\
6133 		(__dport)->result.start_time = ktime_get_real_seconds();	\
6134 		(__dport)->result.status = DPORT_TEST_ST_INPRG;			\
6135 		(__dport)->result.mode = (__mode);				\
6136 		(__dport)->result.rp_pwwn = (__dport)->rp_pwwn;			\
6137 		(__dport)->result.rp_nwwn = (__dport)->rp_nwwn;			\
6138 		(__dport)->result.lpcnt = (__dport)->lpcnt;			\
6139 } while (0)
6140 
6141 static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6142 					enum bfi_dport_req req);
6143 static void
6144 bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6145 {
6146 	if (dport->cbfn != NULL) {
6147 		dport->cbfn(dport->cbarg, bfa_status);
6148 		dport->cbfn = NULL;
6149 		dport->cbarg = NULL;
6150 	}
6151 }
6152 
6153 static void
6154 bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6155 {
6156 	bfa_trc(dport->bfa, event);
6157 
6158 	switch (event) {
6159 	case BFA_DPORT_SM_ENABLE:
6160 		bfa_fcport_dportenable(dport->bfa);
6161 		if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6162 			bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6163 		else
6164 			bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6165 		break;
6166 
6167 	case BFA_DPORT_SM_DISABLE:
6168 		/* Already disabled */
6169 		break;
6170 
6171 	case BFA_DPORT_SM_HWFAIL:
6172 		/* ignore */
6173 		break;
6174 
6175 	case BFA_DPORT_SM_SCN:
6176 		if (dport->i2hmsg.scn.state ==  BFI_DPORT_SCN_DDPORT_ENABLE) {
6177 			bfa_fcport_ddportenable(dport->bfa);
6178 			dport->dynamic = BFA_TRUE;
6179 			dport->test_state = BFA_DPORT_ST_NOTSTART;
6180 			bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6181 		} else {
6182 			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6183 			WARN_ON(1);
6184 		}
6185 		break;
6186 
6187 	default:
6188 		bfa_sm_fault(dport->bfa, event);
6189 	}
6190 }
6191 
6192 static void
6193 bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6194 			    enum bfa_dport_sm_event event)
6195 {
6196 	bfa_trc(dport->bfa, event);
6197 
6198 	switch (event) {
6199 	case BFA_DPORT_SM_QRESUME:
6200 		bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6201 		bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6202 		break;
6203 
6204 	case BFA_DPORT_SM_HWFAIL:
6205 		bfa_reqq_wcancel(&dport->reqq_wait);
6206 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6207 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6208 		break;
6209 
6210 	default:
6211 		bfa_sm_fault(dport->bfa, event);
6212 	}
6213 }
6214 
6215 static void
6216 bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6217 {
6218 	bfa_trc(dport->bfa, event);
6219 
6220 	switch (event) {
6221 	case BFA_DPORT_SM_FWRSP:
6222 		memset(&dport->result, 0,
6223 				sizeof(struct bfa_diag_dport_result_s));
6224 		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6225 			dport->test_state = BFA_DPORT_ST_NO_SFP;
6226 		} else {
6227 			dport->test_state = BFA_DPORT_ST_INP;
6228 			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
6229 		}
6230 		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6231 		break;
6232 
6233 	case BFA_DPORT_SM_REQFAIL:
6234 		dport->test_state = BFA_DPORT_ST_DISABLED;
6235 		bfa_fcport_dportdisable(dport->bfa);
6236 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6237 		break;
6238 
6239 	case BFA_DPORT_SM_HWFAIL:
6240 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6241 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6242 		break;
6243 
6244 	default:
6245 		bfa_sm_fault(dport->bfa, event);
6246 	}
6247 }
6248 
6249 static void
6250 bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6251 {
6252 	bfa_trc(dport->bfa, event);
6253 
6254 	switch (event) {
6255 	case BFA_DPORT_SM_START:
6256 		if (bfa_dport_send_req(dport, BFI_DPORT_START))
6257 			bfa_sm_set_state(dport, bfa_dport_sm_starting);
6258 		else
6259 			bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
6260 		break;
6261 
6262 	case BFA_DPORT_SM_DISABLE:
6263 		bfa_fcport_dportdisable(dport->bfa);
6264 		if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6265 			bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6266 		else
6267 			bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6268 		break;
6269 
6270 	case BFA_DPORT_SM_HWFAIL:
6271 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6272 		break;
6273 
6274 	case BFA_DPORT_SM_SCN:
6275 		switch (dport->i2hmsg.scn.state) {
6276 		case BFI_DPORT_SCN_TESTCOMP:
6277 			dport->test_state = BFA_DPORT_ST_COMP;
6278 			break;
6279 
6280 		case BFI_DPORT_SCN_TESTSTART:
6281 			dport->test_state = BFA_DPORT_ST_INP;
6282 			break;
6283 
6284 		case BFI_DPORT_SCN_TESTSKIP:
6285 		case BFI_DPORT_SCN_SUBTESTSTART:
6286 			/* no state change */
6287 			break;
6288 
6289 		case BFI_DPORT_SCN_SFP_REMOVED:
6290 			dport->test_state = BFA_DPORT_ST_NO_SFP;
6291 			break;
6292 
6293 		case BFI_DPORT_SCN_DDPORT_DISABLE:
6294 			bfa_fcport_ddportdisable(dport->bfa);
6295 
6296 			if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
6297 				bfa_sm_set_state(dport,
6298 					 bfa_dport_sm_dynamic_disabling);
6299 			else
6300 				bfa_sm_set_state(dport,
6301 					 bfa_dport_sm_dynamic_disabling_qwait);
6302 			break;
6303 
6304 		case BFI_DPORT_SCN_FCPORT_DISABLE:
6305 			bfa_fcport_ddportdisable(dport->bfa);
6306 
6307 			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6308 			dport->dynamic = BFA_FALSE;
6309 			break;
6310 
6311 		default:
6312 			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6313 			bfa_sm_fault(dport->bfa, event);
6314 		}
6315 		break;
6316 	default:
6317 		bfa_sm_fault(dport->bfa, event);
6318 	}
6319 }
6320 
6321 static void
6322 bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6323 			     enum bfa_dport_sm_event event)
6324 {
6325 	bfa_trc(dport->bfa, event);
6326 
6327 	switch (event) {
6328 	case BFA_DPORT_SM_QRESUME:
6329 		bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6330 		bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6331 		break;
6332 
6333 	case BFA_DPORT_SM_HWFAIL:
6334 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6335 		bfa_reqq_wcancel(&dport->reqq_wait);
6336 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6337 		break;
6338 
6339 	case BFA_DPORT_SM_SCN:
6340 		/* ignore */
6341 		break;
6342 
6343 	default:
6344 		bfa_sm_fault(dport->bfa, event);
6345 	}
6346 }
6347 
6348 static void
6349 bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6350 {
6351 	bfa_trc(dport->bfa, event);
6352 
6353 	switch (event) {
6354 	case BFA_DPORT_SM_FWRSP:
6355 		dport->test_state = BFA_DPORT_ST_DISABLED;
6356 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6357 		break;
6358 
6359 	case BFA_DPORT_SM_HWFAIL:
6360 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6361 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6362 		break;
6363 
6364 	case BFA_DPORT_SM_SCN:
6365 		/* no state change */
6366 		break;
6367 
6368 	default:
6369 		bfa_sm_fault(dport->bfa, event);
6370 	}
6371 }
6372 
6373 static void
6374 bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
6375 			    enum bfa_dport_sm_event event)
6376 {
6377 	bfa_trc(dport->bfa, event);
6378 
6379 	switch (event) {
6380 	case BFA_DPORT_SM_QRESUME:
6381 		bfa_sm_set_state(dport, bfa_dport_sm_starting);
6382 		bfa_dport_send_req(dport, BFI_DPORT_START);
6383 		break;
6384 
6385 	case BFA_DPORT_SM_HWFAIL:
6386 		bfa_reqq_wcancel(&dport->reqq_wait);
6387 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6388 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6389 		break;
6390 
6391 	default:
6392 		bfa_sm_fault(dport->bfa, event);
6393 	}
6394 }
6395 
6396 static void
6397 bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6398 {
6399 	bfa_trc(dport->bfa, event);
6400 
6401 	switch (event) {
6402 	case BFA_DPORT_SM_FWRSP:
6403 		memset(&dport->result, 0,
6404 				sizeof(struct bfa_diag_dport_result_s));
6405 		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6406 			dport->test_state = BFA_DPORT_ST_NO_SFP;
6407 		} else {
6408 			dport->test_state = BFA_DPORT_ST_INP;
6409 			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
6410 		}
6411 		/* fall thru */
6412 
6413 	case BFA_DPORT_SM_REQFAIL:
6414 		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6415 		break;
6416 
6417 	case BFA_DPORT_SM_HWFAIL:
6418 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6419 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6420 		break;
6421 
6422 	default:
6423 		bfa_sm_fault(dport->bfa, event);
6424 	}
6425 }
6426 
6427 static void
6428 bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
6429 			       enum bfa_dport_sm_event event)
6430 {
6431 	bfa_trc(dport->bfa, event);
6432 
6433 	switch (event) {
6434 	case BFA_DPORT_SM_SCN:
6435 		switch (dport->i2hmsg.scn.state) {
6436 		case BFI_DPORT_SCN_DDPORT_DISABLED:
6437 			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6438 			dport->dynamic = BFA_FALSE;
6439 			bfa_fcport_enable(dport->bfa);
6440 			break;
6441 
6442 		default:
6443 			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6444 			bfa_sm_fault(dport->bfa, event);
6445 
6446 		}
6447 		break;
6448 
6449 	case BFA_DPORT_SM_HWFAIL:
6450 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6451 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6452 		break;
6453 
6454 	default:
6455 		bfa_sm_fault(dport->bfa, event);
6456 	}
6457 }
6458 
6459 static void
6460 bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
6461 			    enum bfa_dport_sm_event event)
6462 {
6463 	bfa_trc(dport->bfa, event);
6464 
6465 	switch (event) {
6466 	case BFA_DPORT_SM_QRESUME:
6467 		bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
6468 		bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
6469 		break;
6470 
6471 	case BFA_DPORT_SM_HWFAIL:
6472 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6473 		bfa_reqq_wcancel(&dport->reqq_wait);
6474 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6475 		break;
6476 
6477 	case BFA_DPORT_SM_SCN:
6478 		/* ignore */
6479 		break;
6480 
6481 	default:
6482 		bfa_sm_fault(dport->bfa, event);
6483 	}
6484 }
6485 
6486 static bfa_boolean_t
6487 bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6488 {
6489 	struct bfi_diag_dport_req_s *m;
6490 
6491 	/*
6492 	 * check for room in queue to send request now
6493 	 */
6494 	m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6495 	if (!m) {
6496 		bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6497 		return BFA_FALSE;
6498 	}
6499 
6500 	bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6501 		    bfa_fn_lpu(dport->bfa));
6502 	m->req  = req;
6503 	if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
6504 		m->lpcnt = cpu_to_be32(dport->lpcnt);
6505 		m->payload = cpu_to_be32(dport->payload);
6506 	}
6507 
6508 	/*
6509 	 * queue I/O message to firmware
6510 	 */
6511 	bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6512 
6513 	return BFA_TRUE;
6514 }
6515 
6516 static void
6517 bfa_dport_qresume(void *cbarg)
6518 {
6519 	struct bfa_dport_s *dport = cbarg;
6520 
6521 	bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6522 }
6523 
6524 static void
6525 bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
6526 {
6527 	msg->status = cpu_to_be32(msg->status);
6528 	dport->i2hmsg.rsp.status = msg->status;
6529 	dport->rp_pwwn = msg->pwwn;
6530 	dport->rp_nwwn = msg->nwwn;
6531 
6532 	if ((msg->status == BFA_STATUS_OK) ||
6533 	    (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
6534 		bfa_trc(dport->bfa, msg->status);
6535 		bfa_trc(dport->bfa, dport->rp_pwwn);
6536 		bfa_trc(dport->bfa, dport->rp_nwwn);
6537 		bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6538 
6539 	} else {
6540 		bfa_trc(dport->bfa, msg->status);
6541 		bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
6542 	}
6543 	bfa_cb_fcdiag_dport(dport, msg->status);
6544 }
6545 
6546 static bfa_boolean_t
6547 bfa_dport_is_sending_req(struct bfa_dport_s *dport)
6548 {
6549 	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling)	||
6550 	    bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6551 	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling)	||
6552 	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
6553 	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting)	||
6554 	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
6555 		return BFA_TRUE;
6556 	} else {
6557 		return BFA_FALSE;
6558 	}
6559 }
6560 
6561 static void
6562 bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6563 {
6564 	int i;
6565 	uint8_t subtesttype;
6566 
6567 	bfa_trc(dport->bfa, msg->state);
6568 	dport->i2hmsg.scn.state = msg->state;
6569 
6570 	switch (dport->i2hmsg.scn.state) {
6571 	case BFI_DPORT_SCN_TESTCOMP:
6572 		dport->result.end_time = ktime_get_real_seconds();
6573 		bfa_trc(dport->bfa, dport->result.end_time);
6574 
6575 		dport->result.status = msg->info.testcomp.status;
6576 		bfa_trc(dport->bfa, dport->result.status);
6577 
6578 		dport->result.roundtrip_latency =
6579 			cpu_to_be32(msg->info.testcomp.latency);
6580 		dport->result.est_cable_distance =
6581 			cpu_to_be32(msg->info.testcomp.distance);
6582 		dport->result.buffer_required =
6583 			be16_to_cpu(msg->info.testcomp.numbuffer);
6584 
6585 		dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
6586 		dport->result.speed = msg->info.testcomp.speed;
6587 
6588 		bfa_trc(dport->bfa, dport->result.roundtrip_latency);
6589 		bfa_trc(dport->bfa, dport->result.est_cable_distance);
6590 		bfa_trc(dport->bfa, dport->result.buffer_required);
6591 		bfa_trc(dport->bfa, dport->result.frmsz);
6592 		bfa_trc(dport->bfa, dport->result.speed);
6593 
6594 		for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
6595 			dport->result.subtest[i].status =
6596 				msg->info.testcomp.subtest_status[i];
6597 			bfa_trc(dport->bfa, dport->result.subtest[i].status);
6598 		}
6599 		break;
6600 
6601 	case BFI_DPORT_SCN_TESTSKIP:
6602 	case BFI_DPORT_SCN_DDPORT_ENABLE:
6603 		memset(&dport->result, 0,
6604 				sizeof(struct bfa_diag_dport_result_s));
6605 		break;
6606 
6607 	case BFI_DPORT_SCN_TESTSTART:
6608 		memset(&dport->result, 0,
6609 				sizeof(struct bfa_diag_dport_result_s));
6610 		dport->rp_pwwn = msg->info.teststart.pwwn;
6611 		dport->rp_nwwn = msg->info.teststart.nwwn;
6612 		dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
6613 		bfa_dport_result_start(dport, msg->info.teststart.mode);
6614 		break;
6615 
6616 	case BFI_DPORT_SCN_SUBTESTSTART:
6617 		subtesttype = msg->info.teststart.type;
6618 		dport->result.subtest[subtesttype].start_time =
6619 			ktime_get_real_seconds();
6620 		dport->result.subtest[subtesttype].status =
6621 			DPORT_TEST_ST_INPRG;
6622 
6623 		bfa_trc(dport->bfa, subtesttype);
6624 		bfa_trc(dport->bfa,
6625 			dport->result.subtest[subtesttype].start_time);
6626 		break;
6627 
6628 	case BFI_DPORT_SCN_SFP_REMOVED:
6629 	case BFI_DPORT_SCN_DDPORT_DISABLED:
6630 	case BFI_DPORT_SCN_DDPORT_DISABLE:
6631 	case BFI_DPORT_SCN_FCPORT_DISABLE:
6632 		dport->result.status = DPORT_TEST_ST_IDLE;
6633 		break;
6634 
6635 	default:
6636 		bfa_sm_fault(dport->bfa, msg->state);
6637 	}
6638 
6639 	bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
6640 }
6641 
6642 /*
6643  * Dport enable
6644  *
6645  * @param[in] *bfa            - bfa data struct
6646  */
6647 bfa_status_t
6648 bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6649 				bfa_cb_diag_t cbfn, void *cbarg)
6650 {
6651 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6652 	struct bfa_dport_s  *dport = &fcdiag->dport;
6653 
6654 	/*
6655 	 * Dport is not support in MEZZ card
6656 	 */
6657 	if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6658 		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6659 		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6660 	}
6661 
6662 	/*
6663 	 * Dport is supported in CT2 or above
6664 	 */
6665 	if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
6666 		bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
6667 		return BFA_STATUS_FEATURE_NOT_SUPPORTED;
6668 	}
6669 
6670 	/*
6671 	 * Check to see if IOC is down
6672 	*/
6673 	if (!bfa_iocfc_is_operational(bfa))
6674 		return BFA_STATUS_IOC_NON_OP;
6675 
6676 	/* if port is PBC disabled, return error */
6677 	if (bfa_fcport_is_pbcdisabled(bfa)) {
6678 		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6679 		return BFA_STATUS_PBC;
6680 	}
6681 
6682 	/*
6683 	 * Check if port mode is FC port
6684 	 */
6685 	if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6686 		bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6687 		return BFA_STATUS_CMD_NOTSUPP_CNA;
6688 	}
6689 
6690 	/*
6691 	 * Check if port is in LOOP mode
6692 	 */
6693 	if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6694 	    (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6695 		bfa_trc(dport->bfa, 0);
6696 		return BFA_STATUS_TOPOLOGY_LOOP;
6697 	}
6698 
6699 	/*
6700 	 * Check if port is TRUNK mode
6701 	 */
6702 	if (bfa_fcport_is_trunk_enabled(bfa)) {
6703 		bfa_trc(dport->bfa, 0);
6704 		return BFA_STATUS_ERROR_TRUNK_ENABLED;
6705 	}
6706 
6707 	/*
6708 	 * Check if diag loopback is running
6709 	 */
6710 	if (bfa_fcdiag_lb_is_running(bfa)) {
6711 		bfa_trc(dport->bfa, 0);
6712 		return BFA_STATUS_DIAG_BUSY;
6713 	}
6714 
6715 	/*
6716 	 * Check to see if port is disable or in dport state
6717 	 */
6718 	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6719 	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6720 		bfa_trc(dport->bfa, 0);
6721 		return BFA_STATUS_PORT_NOT_DISABLED;
6722 	}
6723 
6724 	/*
6725 	 * Check if dport is in dynamic mode
6726 	 */
6727 	if (dport->dynamic)
6728 		return BFA_STATUS_DDPORT_ERR;
6729 
6730 	/*
6731 	 * Check if dport is busy
6732 	 */
6733 	if (bfa_dport_is_sending_req(dport))
6734 		return BFA_STATUS_DEVBUSY;
6735 
6736 	/*
6737 	 * Check if dport is already enabled
6738 	 */
6739 	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6740 		bfa_trc(dport->bfa, 0);
6741 		return BFA_STATUS_DPORT_ENABLED;
6742 	}
6743 
6744 	bfa_trc(dport->bfa, lpcnt);
6745 	bfa_trc(dport->bfa, pat);
6746 	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6747 	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6748 	dport->cbfn = cbfn;
6749 	dport->cbarg = cbarg;
6750 
6751 	bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6752 	return BFA_STATUS_OK;
6753 }
6754 
6755 /*
6756  *	Dport disable
6757  *
6758  *	@param[in] *bfa            - bfa data struct
6759  */
6760 bfa_status_t
6761 bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6762 {
6763 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6764 	struct bfa_dport_s *dport = &fcdiag->dport;
6765 
6766 	if (bfa_ioc_is_disabled(&bfa->ioc))
6767 		return BFA_STATUS_IOC_DISABLED;
6768 
6769 	/* if port is PBC disabled, return error */
6770 	if (bfa_fcport_is_pbcdisabled(bfa)) {
6771 		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6772 		return BFA_STATUS_PBC;
6773 	}
6774 
6775 	/*
6776 	 * Check if dport is in dynamic mode
6777 	 */
6778 	if (dport->dynamic) {
6779 		return BFA_STATUS_DDPORT_ERR;
6780 	}
6781 
6782 	/*
6783 	 * Check to see if port is disable or in dport state
6784 	 */
6785 	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6786 	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6787 		bfa_trc(dport->bfa, 0);
6788 		return BFA_STATUS_PORT_NOT_DISABLED;
6789 	}
6790 
6791 	/*
6792 	 * Check if dport is busy
6793 	 */
6794 	if (bfa_dport_is_sending_req(dport))
6795 		return BFA_STATUS_DEVBUSY;
6796 
6797 	/*
6798 	 * Check if dport is already disabled
6799 	 */
6800 	if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6801 		bfa_trc(dport->bfa, 0);
6802 		return BFA_STATUS_DPORT_DISABLED;
6803 	}
6804 
6805 	dport->cbfn = cbfn;
6806 	dport->cbarg = cbarg;
6807 
6808 	bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6809 	return BFA_STATUS_OK;
6810 }
6811 
6812 /*
6813  * Dport start -- restart dport test
6814  *
6815  *   @param[in] *bfa		- bfa data struct
6816  */
6817 bfa_status_t
6818 bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6819 			bfa_cb_diag_t cbfn, void *cbarg)
6820 {
6821 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6822 	struct bfa_dport_s *dport = &fcdiag->dport;
6823 
6824 	/*
6825 	 * Check to see if IOC is down
6826 	 */
6827 	if (!bfa_iocfc_is_operational(bfa))
6828 		return BFA_STATUS_IOC_NON_OP;
6829 
6830 	/*
6831 	 * Check if dport is in dynamic mode
6832 	 */
6833 	if (dport->dynamic)
6834 		return BFA_STATUS_DDPORT_ERR;
6835 
6836 	/*
6837 	 * Check if dport is busy
6838 	 */
6839 	if (bfa_dport_is_sending_req(dport))
6840 		return BFA_STATUS_DEVBUSY;
6841 
6842 	/*
6843 	 * Check if dport is in enabled state.
6844 	 * Test can only be restart when previous test has completed
6845 	 */
6846 	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6847 		bfa_trc(dport->bfa, 0);
6848 		return BFA_STATUS_DPORT_DISABLED;
6849 
6850 	} else {
6851 		if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6852 			return BFA_STATUS_DPORT_INV_SFP;
6853 
6854 		if (dport->test_state == BFA_DPORT_ST_INP)
6855 			return BFA_STATUS_DEVBUSY;
6856 
6857 		WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
6858 	}
6859 
6860 	bfa_trc(dport->bfa, lpcnt);
6861 	bfa_trc(dport->bfa, pat);
6862 
6863 	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6864 	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6865 
6866 	dport->cbfn = cbfn;
6867 	dport->cbarg = cbarg;
6868 
6869 	bfa_sm_send_event(dport, BFA_DPORT_SM_START);
6870 	return BFA_STATUS_OK;
6871 }
6872 
6873 /*
6874  * Dport show -- return dport test result
6875  *
6876  *   @param[in] *bfa		- bfa data struct
6877  */
6878 bfa_status_t
6879 bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
6880 {
6881 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6882 	struct bfa_dport_s *dport = &fcdiag->dport;
6883 
6884 	/*
6885 	 * Check to see if IOC is down
6886 	 */
6887 	if (!bfa_iocfc_is_operational(bfa))
6888 		return BFA_STATUS_IOC_NON_OP;
6889 
6890 	/*
6891 	 * Check if dport is busy
6892 	 */
6893 	if (bfa_dport_is_sending_req(dport))
6894 		return BFA_STATUS_DEVBUSY;
6895 
6896 	/*
6897 	 * Check if dport is in enabled state.
6898 	 */
6899 	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6900 		bfa_trc(dport->bfa, 0);
6901 		return BFA_STATUS_DPORT_DISABLED;
6902 
6903 	}
6904 
6905 	/*
6906 	 * Check if there is SFP
6907 	 */
6908 	if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6909 		return BFA_STATUS_DPORT_INV_SFP;
6910 
6911 	memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
6912 
6913 	return BFA_STATUS_OK;
6914 }
6915