xref: /openbmc/linux/drivers/scsi/bfa/bfa_svc.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfa_os_inc.h"
19 #include "bfa_plog.h"
20 #include "bfa_cs.h"
21 #include "bfa_modules.h"
22 #include "bfad_drv.h"
23 
24 BFA_TRC_FILE(HAL, FCXP);
25 BFA_MODULE(fcxp);
26 BFA_MODULE(sgpg);
27 BFA_MODULE(lps);
28 BFA_MODULE(fcport);
29 BFA_MODULE(rport);
30 BFA_MODULE(uf);
31 
32 /*
33  * LPS related definitions
34  */
35 #define BFA_LPS_MIN_LPORTS      (1)
36 #define BFA_LPS_MAX_LPORTS      (256)
37 
38 /*
39  * Maximum Vports supported per physical port or vf.
40  */
41 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
42 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
43 
44 /*
45  *  lps_pvt BFA LPS private functions
46  */
47 
48 enum bfa_lps_event {
49 	BFA_LPS_SM_LOGIN	= 1,	/* login request from user	*/
50 	BFA_LPS_SM_LOGOUT	= 2,	/* logout request from user	*/
51 	BFA_LPS_SM_FWRSP	= 3,	/* f/w response to login/logout	*/
52 	BFA_LPS_SM_RESUME	= 4,	/* space present in reqq queue	*/
53 	BFA_LPS_SM_DELETE	= 5,	/* lps delete from user		*/
54 	BFA_LPS_SM_OFFLINE	= 6,	/* Link is offline		*/
55 	BFA_LPS_SM_RX_CVL	= 7,	/* Rx clear virtual link	*/
56 };
57 
58 /*
59  * FC PORT related definitions
60  */
61 /*
62  * The port is considered disabled if corresponding physical port or IOC are
63  * disabled explicitly
64  */
65 #define BFA_PORT_IS_DISABLED(bfa) \
66 	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
67 	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
68 
69 
70 /*
71  * BFA port state machine events
72  */
73 enum bfa_fcport_sm_event {
74 	BFA_FCPORT_SM_START	= 1,	/*  start port state machine	*/
75 	BFA_FCPORT_SM_STOP	= 2,	/*  stop port state machine	*/
76 	BFA_FCPORT_SM_ENABLE	= 3,	/*  enable port		*/
77 	BFA_FCPORT_SM_DISABLE	= 4,	/*  disable port state machine */
78 	BFA_FCPORT_SM_FWRSP	= 5,	/*  firmware enable/disable rsp */
79 	BFA_FCPORT_SM_LINKUP	= 6,	/*  firmware linkup event	*/
80 	BFA_FCPORT_SM_LINKDOWN	= 7,	/*  firmware linkup down	*/
81 	BFA_FCPORT_SM_QRESUME	= 8,	/*  CQ space available	*/
82 	BFA_FCPORT_SM_HWFAIL	= 9,	/*  IOC h/w failure		*/
83 };
84 
85 /*
86  * BFA port link notification state machine events
87  */
88 
89 enum bfa_fcport_ln_sm_event {
90 	BFA_FCPORT_LN_SM_LINKUP		= 1,	/*  linkup event	*/
91 	BFA_FCPORT_LN_SM_LINKDOWN	= 2,	/*  linkdown event	*/
92 	BFA_FCPORT_LN_SM_NOTIFICATION	= 3	/*  done notification	*/
93 };
94 
95 /*
96  * RPORT related definitions
97  */
98 #define bfa_rport_offline_cb(__rp) do {					\
99 	if ((__rp)->bfa->fcs)						\
100 		bfa_cb_rport_offline((__rp)->rport_drv);      \
101 	else {								\
102 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
103 				__bfa_cb_rport_offline, (__rp));      \
104 	}								\
105 } while (0)
106 
107 #define bfa_rport_online_cb(__rp) do {					\
108 	if ((__rp)->bfa->fcs)						\
109 		bfa_cb_rport_online((__rp)->rport_drv);      \
110 	else {								\
111 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
112 				  __bfa_cb_rport_online, (__rp));      \
113 		}							\
114 } while (0)
115 
116 
117 enum bfa_rport_event {
118 	BFA_RPORT_SM_CREATE	= 1,	/*  rport create event		*/
119 	BFA_RPORT_SM_DELETE	= 2,	/*  deleting an existing rport	*/
120 	BFA_RPORT_SM_ONLINE	= 3,	/*  rport is online		*/
121 	BFA_RPORT_SM_OFFLINE	= 4,	/*  rport is offline		*/
122 	BFA_RPORT_SM_FWRSP	= 5,	/*  firmware response		*/
123 	BFA_RPORT_SM_HWFAIL	= 6,	/*  IOC h/w failure		*/
124 	BFA_RPORT_SM_QOS_SCN	= 7,	/*  QoS SCN from firmware	*/
125 	BFA_RPORT_SM_SET_SPEED	= 8,	/*  Set Rport Speed		*/
126 	BFA_RPORT_SM_QRESUME	= 9,	/*  space in requeue queue	*/
127 };
128 
129 /*
130  * forward declarations FCXP related functions
131  */
132 static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
133 static void	hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
134 				struct bfi_fcxp_send_rsp_s *fcxp_rsp);
135 static void	hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
136 				struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
137 static void	bfa_fcxp_qresume(void *cbarg);
138 static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
139 				struct bfi_fcxp_send_req_s *send_req);
140 
141 /*
142  * forward declarations for LPS functions
143  */
144 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
145 				u32 *dm_len);
146 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
147 				struct bfa_iocfc_cfg_s *cfg,
148 				struct bfa_meminfo_s *meminfo,
149 				struct bfa_pcidev_s *pcidev);
150 static void bfa_lps_detach(struct bfa_s *bfa);
151 static void bfa_lps_start(struct bfa_s *bfa);
152 static void bfa_lps_stop(struct bfa_s *bfa);
153 static void bfa_lps_iocdisable(struct bfa_s *bfa);
154 static void bfa_lps_login_rsp(struct bfa_s *bfa,
155 				struct bfi_lps_login_rsp_s *rsp);
156 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
157 				struct bfi_lps_logout_rsp_s *rsp);
158 static void bfa_lps_reqq_resume(void *lps_arg);
159 static void bfa_lps_free(struct bfa_lps_s *lps);
160 static void bfa_lps_send_login(struct bfa_lps_s *lps);
161 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
162 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
163 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
164 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
165 
166 /*
167  * forward declaration for LPS state machine
168  */
169 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
170 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
171 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
172 					event);
173 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
174 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
175 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
176 					event);
177 
178 /*
179  * forward declaration for FC Port functions
180  */
181 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
182 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
183 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
184 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
185 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
186 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
187 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
188 			enum bfa_port_linkstate event, bfa_boolean_t trunk);
189 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
190 				enum bfa_port_linkstate event);
191 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
192 static void bfa_fcport_stats_get_timeout(void *cbarg);
193 static void bfa_fcport_stats_clr_timeout(void *cbarg);
194 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
195 
196 /*
197  * forward declaration for FC PORT state machine
198  */
199 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
200 					enum bfa_fcport_sm_event event);
201 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
202 					enum bfa_fcport_sm_event event);
203 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
204 					enum bfa_fcport_sm_event event);
205 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
206 					enum bfa_fcport_sm_event event);
207 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
208 					enum bfa_fcport_sm_event event);
209 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
210 					enum bfa_fcport_sm_event event);
211 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
212 					enum bfa_fcport_sm_event event);
213 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
214 					enum bfa_fcport_sm_event event);
215 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
216 					enum bfa_fcport_sm_event event);
217 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
218 					enum bfa_fcport_sm_event event);
219 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
220 					enum bfa_fcport_sm_event event);
221 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
222 					enum bfa_fcport_sm_event event);
223 
224 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
225 					enum bfa_fcport_ln_sm_event event);
226 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
227 					enum bfa_fcport_ln_sm_event event);
228 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
229 					enum bfa_fcport_ln_sm_event event);
230 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
231 					enum bfa_fcport_ln_sm_event event);
232 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
233 					enum bfa_fcport_ln_sm_event event);
234 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
235 					enum bfa_fcport_ln_sm_event event);
236 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
237 					enum bfa_fcport_ln_sm_event event);
238 
239 static struct bfa_sm_table_s hal_port_sm_table[] = {
240 	{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
241 	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
242 	{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
243 	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
244 	{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
245 	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
246 	{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
247 	{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
248 	{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
249 	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
250 	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
251 	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
252 };
253 
254 
255 /*
256  * forward declaration for RPORT related functions
257  */
258 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
259 static void		bfa_rport_free(struct bfa_rport_s *rport);
260 static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
261 static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
262 static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
263 static void		__bfa_cb_rport_online(void *cbarg,
264 						bfa_boolean_t complete);
265 static void		__bfa_cb_rport_offline(void *cbarg,
266 						bfa_boolean_t complete);
267 
268 /*
269  * forward declaration for RPORT state machine
270  */
271 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
272 					enum bfa_rport_event event);
273 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
274 					enum bfa_rport_event event);
275 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
276 					enum bfa_rport_event event);
277 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
278 					enum bfa_rport_event event);
279 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
280 					enum bfa_rport_event event);
281 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
282 					enum bfa_rport_event event);
283 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
284 					enum bfa_rport_event event);
285 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
286 					enum bfa_rport_event event);
287 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
288 					enum bfa_rport_event event);
289 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
290 					enum bfa_rport_event event);
291 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
292 					enum bfa_rport_event event);
293 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
294 					enum bfa_rport_event event);
295 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
296 					enum bfa_rport_event event);
297 
298 /*
299  * PLOG related definitions
300  */
301 static int
302 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
303 {
304 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
305 		(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
306 		return 1;
307 
308 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
309 		(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
310 		return 1;
311 
312 	return 0;
313 }
314 
315 static void
316 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
317 {
318 	u16 tail;
319 	struct bfa_plog_rec_s *pl_recp;
320 
321 	if (plog->plog_enabled == 0)
322 		return;
323 
324 	if (plkd_validate_logrec(pl_rec)) {
325 		bfa_assert(0);
326 		return;
327 	}
328 
329 	tail = plog->tail;
330 
331 	pl_recp = &(plog->plog_recs[tail]);
332 
333 	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
334 
335 	pl_recp->tv = bfa_os_get_log_time();
336 	BFA_PL_LOG_REC_INCR(plog->tail);
337 
338 	if (plog->head == plog->tail)
339 		BFA_PL_LOG_REC_INCR(plog->head);
340 }
341 
342 void
343 bfa_plog_init(struct bfa_plog_s *plog)
344 {
345 	memset((char *)plog, 0, sizeof(struct bfa_plog_s));
346 
347 	memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
348 	plog->head = plog->tail = 0;
349 	plog->plog_enabled = 1;
350 }
351 
352 void
353 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
354 		enum bfa_plog_eid event,
355 		u16 misc, char *log_str)
356 {
357 	struct bfa_plog_rec_s  lp;
358 
359 	if (plog->plog_enabled) {
360 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
361 		lp.mid = mid;
362 		lp.eid = event;
363 		lp.log_type = BFA_PL_LOG_TYPE_STRING;
364 		lp.misc = misc;
365 		strncpy(lp.log_entry.string_log, log_str,
366 			BFA_PL_STRING_LOG_SZ - 1);
367 		lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
368 		bfa_plog_add(plog, &lp);
369 	}
370 }
371 
372 void
373 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
374 		enum bfa_plog_eid event,
375 		u16 misc, u32 *intarr, u32 num_ints)
376 {
377 	struct bfa_plog_rec_s  lp;
378 	u32 i;
379 
380 	if (num_ints > BFA_PL_INT_LOG_SZ)
381 		num_ints = BFA_PL_INT_LOG_SZ;
382 
383 	if (plog->plog_enabled) {
384 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
385 		lp.mid = mid;
386 		lp.eid = event;
387 		lp.log_type = BFA_PL_LOG_TYPE_INT;
388 		lp.misc = misc;
389 
390 		for (i = 0; i < num_ints; i++)
391 			lp.log_entry.int_log[i] = intarr[i];
392 
393 		lp.log_num_ints = (u8) num_ints;
394 
395 		bfa_plog_add(plog, &lp);
396 	}
397 }
398 
399 void
400 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
401 			enum bfa_plog_eid event,
402 			u16 misc, struct fchs_s *fchdr)
403 {
404 	struct bfa_plog_rec_s  lp;
405 	u32	*tmp_int = (u32 *) fchdr;
406 	u32	ints[BFA_PL_INT_LOG_SZ];
407 
408 	if (plog->plog_enabled) {
409 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
410 
411 		ints[0] = tmp_int[0];
412 		ints[1] = tmp_int[1];
413 		ints[2] = tmp_int[4];
414 
415 		bfa_plog_intarr(plog, mid, event, misc, ints, 3);
416 	}
417 }
418 
419 void
420 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
421 		      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
422 		      u32 pld_w0)
423 {
424 	struct bfa_plog_rec_s  lp;
425 	u32	*tmp_int = (u32 *) fchdr;
426 	u32	ints[BFA_PL_INT_LOG_SZ];
427 
428 	if (plog->plog_enabled) {
429 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
430 
431 		ints[0] = tmp_int[0];
432 		ints[1] = tmp_int[1];
433 		ints[2] = tmp_int[4];
434 		ints[3] = pld_w0;
435 
436 		bfa_plog_intarr(plog, mid, event, misc, ints, 4);
437 	}
438 }
439 
440 void
441 bfa_plog_clear(struct bfa_plog_s *plog)
442 {
443 	plog->head = plog->tail = 0;
444 }
445 
446 void
447 bfa_plog_enable(struct bfa_plog_s *plog)
448 {
449 	plog->plog_enabled = 1;
450 }
451 
452 void
453 bfa_plog_disable(struct bfa_plog_s *plog)
454 {
455 	plog->plog_enabled = 0;
456 }
457 
458 bfa_boolean_t
459 bfa_plog_get_setting(struct bfa_plog_s *plog)
460 {
461 	return (bfa_boolean_t)plog->plog_enabled;
462 }
463 
464 /*
465  *  fcxp_pvt BFA FCXP private functions
466  */
467 
468 static void
469 claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
470 {
471 	u8	       *dm_kva = NULL;
472 	u64	dm_pa;
473 	u32	buf_pool_sz;
474 
475 	dm_kva = bfa_meminfo_dma_virt(mi);
476 	dm_pa = bfa_meminfo_dma_phys(mi);
477 
478 	buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
479 
480 	/*
481 	 * Initialize the fcxp req payload list
482 	 */
483 	mod->req_pld_list_kva = dm_kva;
484 	mod->req_pld_list_pa = dm_pa;
485 	dm_kva += buf_pool_sz;
486 	dm_pa += buf_pool_sz;
487 	memset(mod->req_pld_list_kva, 0, buf_pool_sz);
488 
489 	/*
490 	 * Initialize the fcxp rsp payload list
491 	 */
492 	buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
493 	mod->rsp_pld_list_kva = dm_kva;
494 	mod->rsp_pld_list_pa = dm_pa;
495 	dm_kva += buf_pool_sz;
496 	dm_pa += buf_pool_sz;
497 	memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
498 
499 	bfa_meminfo_dma_virt(mi) = dm_kva;
500 	bfa_meminfo_dma_phys(mi) = dm_pa;
501 }
502 
503 static void
504 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
505 {
506 	u16	i;
507 	struct bfa_fcxp_s *fcxp;
508 
509 	fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
510 	memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
511 
512 	INIT_LIST_HEAD(&mod->fcxp_free_q);
513 	INIT_LIST_HEAD(&mod->fcxp_active_q);
514 
515 	mod->fcxp_list = fcxp;
516 
517 	for (i = 0; i < mod->num_fcxps; i++) {
518 		fcxp->fcxp_mod = mod;
519 		fcxp->fcxp_tag = i;
520 
521 		list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
522 		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
523 		fcxp->reqq_waiting = BFA_FALSE;
524 
525 		fcxp = fcxp + 1;
526 	}
527 
528 	bfa_meminfo_kva(mi) = (void *)fcxp;
529 }
530 
531 static void
532 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
533 		 u32 *dm_len)
534 {
535 	u16	num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
536 
537 	if (num_fcxp_reqs == 0)
538 		return;
539 
540 	/*
541 	 * Account for req/rsp payload
542 	 */
543 	*dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
544 	if (cfg->drvcfg.min_cfg)
545 		*dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
546 	else
547 		*dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
548 
549 	/*
550 	 * Account for fcxp structs
551 	 */
552 	*ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
553 }
554 
555 static void
556 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
557 		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
558 {
559 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
560 
561 	memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
562 	mod->bfa = bfa;
563 	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
564 
565 	/*
566 	 * Initialize FCXP request and response payload sizes.
567 	 */
568 	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
569 	if (!cfg->drvcfg.min_cfg)
570 		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
571 
572 	INIT_LIST_HEAD(&mod->wait_q);
573 
574 	claim_fcxp_req_rsp_mem(mod, meminfo);
575 	claim_fcxps_mem(mod, meminfo);
576 }
577 
578 static void
579 bfa_fcxp_detach(struct bfa_s *bfa)
580 {
581 }
582 
583 static void
584 bfa_fcxp_start(struct bfa_s *bfa)
585 {
586 }
587 
588 static void
589 bfa_fcxp_stop(struct bfa_s *bfa)
590 {
591 }
592 
593 static void
594 bfa_fcxp_iocdisable(struct bfa_s *bfa)
595 {
596 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
597 	struct bfa_fcxp_s *fcxp;
598 	struct list_head	      *qe, *qen;
599 
600 	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
601 		fcxp = (struct bfa_fcxp_s *) qe;
602 		if (fcxp->caller == NULL) {
603 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
604 					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
605 			bfa_fcxp_free(fcxp);
606 		} else {
607 			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
608 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
609 				     __bfa_fcxp_send_cbfn, fcxp);
610 		}
611 	}
612 }
613 
614 static struct bfa_fcxp_s *
615 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
616 {
617 	struct bfa_fcxp_s *fcxp;
618 
619 	bfa_q_deq(&fm->fcxp_free_q, &fcxp);
620 
621 	if (fcxp)
622 		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
623 
624 	return fcxp;
625 }
626 
627 static void
628 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
629 	       struct bfa_s *bfa,
630 	       u8 *use_ibuf,
631 	       u32 *nr_sgles,
632 	       bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
633 	       bfa_fcxp_get_sglen_t *r_sglen_cbfn,
634 	       struct list_head *r_sgpg_q,
635 	       int n_sgles,
636 	       bfa_fcxp_get_sgaddr_t sga_cbfn,
637 	       bfa_fcxp_get_sglen_t sglen_cbfn)
638 {
639 
640 	bfa_assert(bfa != NULL);
641 
642 	bfa_trc(bfa, fcxp->fcxp_tag);
643 
644 	if (n_sgles == 0) {
645 		*use_ibuf = 1;
646 	} else {
647 		bfa_assert(*sga_cbfn != NULL);
648 		bfa_assert(*sglen_cbfn != NULL);
649 
650 		*use_ibuf = 0;
651 		*r_sga_cbfn = sga_cbfn;
652 		*r_sglen_cbfn = sglen_cbfn;
653 
654 		*nr_sgles = n_sgles;
655 
656 		/*
657 		 * alloc required sgpgs
658 		 */
659 		if (n_sgles > BFI_SGE_INLINE)
660 			bfa_assert(0);
661 	}
662 
663 }
664 
665 static void
666 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
667 	       void *caller, struct bfa_s *bfa, int nreq_sgles,
668 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
669 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
670 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
671 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
672 {
673 
674 	bfa_assert(bfa != NULL);
675 
676 	bfa_trc(bfa, fcxp->fcxp_tag);
677 
678 	fcxp->caller = caller;
679 
680 	bfa_fcxp_init_reqrsp(fcxp, bfa,
681 		&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
682 		&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
683 		nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
684 
685 	bfa_fcxp_init_reqrsp(fcxp, bfa,
686 		&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
687 		&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
688 		nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
689 
690 }
691 
692 static void
693 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
694 {
695 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
696 	struct bfa_fcxp_wqe_s *wqe;
697 
698 	bfa_q_deq(&mod->wait_q, &wqe);
699 	if (wqe) {
700 		bfa_trc(mod->bfa, fcxp->fcxp_tag);
701 
702 		bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
703 			wqe->nrsp_sgles, wqe->req_sga_cbfn,
704 			wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
705 			wqe->rsp_sglen_cbfn);
706 
707 		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
708 		return;
709 	}
710 
711 	bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
712 	list_del(&fcxp->qe);
713 	list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
714 }
715 
716 static void
717 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
718 		   bfa_status_t req_status, u32 rsp_len,
719 		   u32 resid_len, struct fchs_s *rsp_fchs)
720 {
721 	/* discarded fcxp completion */
722 }
723 
724 static void
725 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
726 {
727 	struct bfa_fcxp_s *fcxp = cbarg;
728 
729 	if (complete) {
730 		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
731 				fcxp->rsp_status, fcxp->rsp_len,
732 				fcxp->residue_len, &fcxp->rsp_fchs);
733 	} else {
734 		bfa_fcxp_free(fcxp);
735 	}
736 }
737 
738 static void
739 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
740 {
741 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
742 	struct bfa_fcxp_s	*fcxp;
743 	u16		fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
744 
745 	bfa_trc(bfa, fcxp_tag);
746 
747 	fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
748 
749 	/*
750 	 * @todo f/w should not set residue to non-0 when everything
751 	 *	 is received.
752 	 */
753 	if (fcxp_rsp->req_status == BFA_STATUS_OK)
754 		fcxp_rsp->residue_len = 0;
755 	else
756 		fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
757 
758 	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
759 
760 	bfa_assert(fcxp->send_cbfn != NULL);
761 
762 	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
763 
764 	if (fcxp->send_cbfn != NULL) {
765 		bfa_trc(mod->bfa, (NULL == fcxp->caller));
766 		if (fcxp->caller == NULL) {
767 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
768 					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
769 					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
770 			/*
771 			 * fcxp automatically freed on return from the callback
772 			 */
773 			bfa_fcxp_free(fcxp);
774 		} else {
775 			fcxp->rsp_status = fcxp_rsp->req_status;
776 			fcxp->rsp_len = fcxp_rsp->rsp_len;
777 			fcxp->residue_len = fcxp_rsp->residue_len;
778 			fcxp->rsp_fchs = fcxp_rsp->fchs;
779 
780 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
781 					__bfa_fcxp_send_cbfn, fcxp);
782 		}
783 	} else {
784 		bfa_trc(bfa, (NULL == fcxp->send_cbfn));
785 	}
786 }
787 
788 static void
789 hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
790 {
791 	union bfi_addr_u      sga_zero = { {0} };
792 
793 	sge->sg_len = reqlen;
794 	sge->flags = BFI_SGE_DATA_LAST;
795 	bfa_dma_addr_set(sge[0].sga, req_pa);
796 	bfa_sge_to_be(sge);
797 	sge++;
798 
799 	sge->sga = sga_zero;
800 	sge->sg_len = reqlen;
801 	sge->flags = BFI_SGE_PGDLEN;
802 	bfa_sge_to_be(sge);
803 }
804 
805 static void
806 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
807 		 struct fchs_s *fchs)
808 {
809 	/*
810 	 * TODO: TX ox_id
811 	 */
812 	if (reqlen > 0) {
813 		if (fcxp->use_ireqbuf) {
814 			u32	pld_w0 =
815 				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
816 
817 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
818 					BFA_PL_EID_TX,
819 					reqlen + sizeof(struct fchs_s), fchs,
820 					pld_w0);
821 		} else {
822 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
823 					BFA_PL_EID_TX,
824 					reqlen + sizeof(struct fchs_s),
825 					fchs);
826 		}
827 	} else {
828 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
829 			       reqlen + sizeof(struct fchs_s), fchs);
830 	}
831 }
832 
833 static void
834 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
835 		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
836 {
837 	if (fcxp_rsp->rsp_len > 0) {
838 		if (fcxp->use_irspbuf) {
839 			u32	pld_w0 =
840 				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
841 
842 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
843 					      BFA_PL_EID_RX,
844 					      (u16) fcxp_rsp->rsp_len,
845 					      &fcxp_rsp->fchs, pld_w0);
846 		} else {
847 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
848 				       BFA_PL_EID_RX,
849 				       (u16) fcxp_rsp->rsp_len,
850 				       &fcxp_rsp->fchs);
851 		}
852 	} else {
853 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
854 			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
855 	}
856 }
857 
858 /*
859  * Handler to resume sending fcxp when space in available in cpe queue.
860  */
861 static void
862 bfa_fcxp_qresume(void *cbarg)
863 {
864 	struct bfa_fcxp_s		*fcxp = cbarg;
865 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
866 	struct bfi_fcxp_send_req_s	*send_req;
867 
868 	fcxp->reqq_waiting = BFA_FALSE;
869 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
870 	bfa_fcxp_queue(fcxp, send_req);
871 }
872 
873 /*
874  * Queue fcxp send request to foimrware.
875  */
876 static void
877 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
878 {
879 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
880 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
881 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
882 	struct bfa_rport_s		*rport = reqi->bfa_rport;
883 
884 	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
885 		    bfa_lpuid(bfa));
886 
887 	send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
888 	if (rport) {
889 		send_req->rport_fw_hndl = rport->fw_handle;
890 		send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
891 		if (send_req->max_frmsz == 0)
892 			send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
893 	} else {
894 		send_req->rport_fw_hndl = 0;
895 		send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
896 	}
897 
898 	send_req->vf_id = cpu_to_be16(reqi->vf_id);
899 	send_req->lp_tag = reqi->lp_tag;
900 	send_req->class = reqi->class;
901 	send_req->rsp_timeout = rspi->rsp_timeout;
902 	send_req->cts = reqi->cts;
903 	send_req->fchs = reqi->fchs;
904 
905 	send_req->req_len = cpu_to_be32(reqi->req_tot_len);
906 	send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
907 
908 	/*
909 	 * setup req sgles
910 	 */
911 	if (fcxp->use_ireqbuf == 1) {
912 		hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
913 					BFA_FCXP_REQ_PLD_PA(fcxp));
914 	} else {
915 		if (fcxp->nreq_sgles > 0) {
916 			bfa_assert(fcxp->nreq_sgles == 1);
917 			hal_fcxp_set_local_sges(send_req->req_sge,
918 						reqi->req_tot_len,
919 						fcxp->req_sga_cbfn(fcxp->caller,
920 								   0));
921 		} else {
922 			bfa_assert(reqi->req_tot_len == 0);
923 			hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
924 		}
925 	}
926 
927 	/*
928 	 * setup rsp sgles
929 	 */
930 	if (fcxp->use_irspbuf == 1) {
931 		bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
932 
933 		hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
934 					BFA_FCXP_RSP_PLD_PA(fcxp));
935 
936 	} else {
937 		if (fcxp->nrsp_sgles > 0) {
938 			bfa_assert(fcxp->nrsp_sgles == 1);
939 			hal_fcxp_set_local_sges(send_req->rsp_sge,
940 						rspi->rsp_maxlen,
941 						fcxp->rsp_sga_cbfn(fcxp->caller,
942 								   0));
943 		} else {
944 			bfa_assert(rspi->rsp_maxlen == 0);
945 			hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
946 		}
947 	}
948 
949 	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
950 
951 	bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
952 
953 	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
954 	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
955 }
956 
957 /*
958  *  hal_fcxp_api BFA FCXP API
959  */
960 
961 /*
962  * Allocate an FCXP instance to send a response or to send a request
963  * that has a response. Request/response buffers are allocated by caller.
964  *
965  * @param[in]	bfa		BFA bfa instance
966  * @param[in]	nreq_sgles	Number of SG elements required for request
967  *				buffer. 0, if fcxp internal buffers are	used.
968  *				Use bfa_fcxp_get_reqbuf() to get the
969  *				internal req buffer.
970  * @param[in]	req_sgles	SG elements describing request buffer. Will be
971  *				copied in by BFA and hence can be freed on
972  *				return from this function.
973  * @param[in]	get_req_sga	function ptr to be called to get a request SG
974  *				Address (given the sge index).
975  * @param[in]	get_req_sglen	function ptr to be called to get a request SG
976  *				len (given the sge index).
977  * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
978  *				Address (given the sge index).
979  * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
980  *				len (given the sge index).
981  *
982  * @return FCXP instance. NULL on failure.
983  */
984 struct bfa_fcxp_s *
985 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
986 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
987 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
988 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
989 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
990 {
991 	struct bfa_fcxp_s *fcxp = NULL;
992 
993 	bfa_assert(bfa != NULL);
994 
995 	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
996 	if (fcxp == NULL)
997 		return NULL;
998 
999 	bfa_trc(bfa, fcxp->fcxp_tag);
1000 
1001 	bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
1002 			req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
1003 
1004 	return fcxp;
1005 }
1006 
1007 /*
1008  * Get the internal request buffer pointer
1009  *
1010  * @param[in]	fcxp	BFA fcxp pointer
1011  *
1012  * @return		pointer to the internal request buffer
1013  */
1014 void *
1015 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
1016 {
1017 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1018 	void	*reqbuf;
1019 
1020 	bfa_assert(fcxp->use_ireqbuf == 1);
1021 	reqbuf = ((u8 *)mod->req_pld_list_kva) +
1022 		fcxp->fcxp_tag * mod->req_pld_sz;
1023 	return reqbuf;
1024 }
1025 
1026 u32
1027 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
1028 {
1029 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1030 
1031 	return mod->req_pld_sz;
1032 }
1033 
1034 /*
1035  * Get the internal response buffer pointer
1036  *
1037  * @param[in]	fcxp	BFA fcxp pointer
1038  *
1039  * @return		pointer to the internal request buffer
1040  */
1041 void *
1042 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1043 {
1044 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1045 	void	*rspbuf;
1046 
1047 	bfa_assert(fcxp->use_irspbuf == 1);
1048 
1049 	rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1050 		fcxp->fcxp_tag * mod->rsp_pld_sz;
1051 	return rspbuf;
1052 }
1053 
1054 /*
1055  *		Free the BFA FCXP
1056  *
1057  * @param[in]	fcxp			BFA fcxp pointer
1058  *
1059  * @return		void
1060  */
1061 void
1062 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1063 {
1064 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1065 
1066 	bfa_assert(fcxp != NULL);
1067 	bfa_trc(mod->bfa, fcxp->fcxp_tag);
1068 	bfa_fcxp_put(fcxp);
1069 }
1070 
1071 /*
1072  * Send a FCXP request
1073  *
1074  * @param[in]	fcxp	BFA fcxp pointer
1075  * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
1076  * @param[in]	vf_id	virtual Fabric ID
1077  * @param[in]	lp_tag	lport tag
1078  * @param[in]	cts	use Continous sequence
1079  * @param[in]	cos	fc Class of Service
1080  * @param[in]	reqlen	request length, does not include FCHS length
1081  * @param[in]	fchs	fc Header Pointer. The header content will be copied
1082  *			in by BFA.
1083  *
1084  * @param[in]	cbfn	call back function to be called on receiving
1085  *								the response
1086  * @param[in]	cbarg	arg for cbfn
1087  * @param[in]	rsp_timeout
1088  *			response timeout
1089  *
1090  * @return		bfa_status_t
1091  */
1092 void
1093 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1094 	      u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1095 	      u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1096 	      void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1097 {
1098 	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
1099 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
1100 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
1101 	struct bfi_fcxp_send_req_s	*send_req;
1102 
1103 	bfa_trc(bfa, fcxp->fcxp_tag);
1104 
1105 	/*
1106 	 * setup request/response info
1107 	 */
1108 	reqi->bfa_rport = rport;
1109 	reqi->vf_id = vf_id;
1110 	reqi->lp_tag = lp_tag;
1111 	reqi->class = cos;
1112 	rspi->rsp_timeout = rsp_timeout;
1113 	reqi->cts = cts;
1114 	reqi->fchs = *fchs;
1115 	reqi->req_tot_len = reqlen;
1116 	rspi->rsp_maxlen = rsp_maxlen;
1117 	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1118 	fcxp->send_cbarg = cbarg;
1119 
1120 	/*
1121 	 * If no room in CPE queue, wait for space in request queue
1122 	 */
1123 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1124 	if (!send_req) {
1125 		bfa_trc(bfa, fcxp->fcxp_tag);
1126 		fcxp->reqq_waiting = BFA_TRUE;
1127 		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1128 		return;
1129 	}
1130 
1131 	bfa_fcxp_queue(fcxp, send_req);
1132 }
1133 
1134 /*
1135  * Abort a BFA FCXP
1136  *
1137  * @param[in]	fcxp	BFA fcxp pointer
1138  *
1139  * @return		void
1140  */
1141 bfa_status_t
1142 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1143 {
1144 	bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1145 	bfa_assert(0);
1146 	return BFA_STATUS_OK;
1147 }
1148 
1149 void
1150 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1151 	       bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1152 	       void *caller, int nreq_sgles,
1153 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1154 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
1155 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1156 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1157 {
1158 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1159 
1160 	bfa_assert(list_empty(&mod->fcxp_free_q));
1161 
1162 	wqe->alloc_cbfn = alloc_cbfn;
1163 	wqe->alloc_cbarg = alloc_cbarg;
1164 	wqe->caller = caller;
1165 	wqe->bfa = bfa;
1166 	wqe->nreq_sgles = nreq_sgles;
1167 	wqe->nrsp_sgles = nrsp_sgles;
1168 	wqe->req_sga_cbfn = req_sga_cbfn;
1169 	wqe->req_sglen_cbfn = req_sglen_cbfn;
1170 	wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1171 	wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1172 
1173 	list_add_tail(&wqe->qe, &mod->wait_q);
1174 }
1175 
1176 void
1177 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1178 {
1179 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1180 
1181 	bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1182 	list_del(&wqe->qe);
1183 }
1184 
1185 void
1186 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1187 {
1188 	/*
1189 	 * If waiting for room in request queue, cancel reqq wait
1190 	 * and free fcxp.
1191 	 */
1192 	if (fcxp->reqq_waiting) {
1193 		fcxp->reqq_waiting = BFA_FALSE;
1194 		bfa_reqq_wcancel(&fcxp->reqq_wqe);
1195 		bfa_fcxp_free(fcxp);
1196 		return;
1197 	}
1198 
1199 	fcxp->send_cbfn = bfa_fcxp_null_comp;
1200 }
1201 
1202 
1203 
1204 /*
1205  *  hal_fcxp_public BFA FCXP public functions
1206  */
1207 
1208 void
1209 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1210 {
1211 	switch (msg->mhdr.msg_id) {
1212 	case BFI_FCXP_I2H_SEND_RSP:
1213 		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1214 		break;
1215 
1216 	default:
1217 		bfa_trc(bfa, msg->mhdr.msg_id);
1218 		bfa_assert(0);
1219 	}
1220 }
1221 
1222 u32
1223 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1224 {
1225 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1226 
1227 	return mod->rsp_pld_sz;
1228 }
1229 
1230 
1231 /*
1232  *  BFA LPS state machine functions
1233  */
1234 
1235 /*
1236  * Init state -- no login
1237  */
1238 static void
1239 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1240 {
1241 	bfa_trc(lps->bfa, lps->lp_tag);
1242 	bfa_trc(lps->bfa, event);
1243 
1244 	switch (event) {
1245 	case BFA_LPS_SM_LOGIN:
1246 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1247 			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1248 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1249 		} else {
1250 			bfa_sm_set_state(lps, bfa_lps_sm_login);
1251 			bfa_lps_send_login(lps);
1252 		}
1253 
1254 		if (lps->fdisc)
1255 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1256 				BFA_PL_EID_LOGIN, 0, "FDISC Request");
1257 		else
1258 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1259 				BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1260 		break;
1261 
1262 	case BFA_LPS_SM_LOGOUT:
1263 		bfa_lps_logout_comp(lps);
1264 		break;
1265 
1266 	case BFA_LPS_SM_DELETE:
1267 		bfa_lps_free(lps);
1268 		break;
1269 
1270 	case BFA_LPS_SM_RX_CVL:
1271 	case BFA_LPS_SM_OFFLINE:
1272 		break;
1273 
1274 	case BFA_LPS_SM_FWRSP:
1275 		/*
1276 		 * Could happen when fabric detects loopback and discards
1277 		 * the lps request. Fw will eventually sent out the timeout
1278 		 * Just ignore
1279 		 */
1280 		break;
1281 
1282 	default:
1283 		bfa_sm_fault(lps->bfa, event);
1284 	}
1285 }
1286 
1287 /*
1288  * login is in progress -- awaiting response from firmware
1289  */
1290 static void
1291 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1292 {
1293 	bfa_trc(lps->bfa, lps->lp_tag);
1294 	bfa_trc(lps->bfa, event);
1295 
1296 	switch (event) {
1297 	case BFA_LPS_SM_FWRSP:
1298 		if (lps->status == BFA_STATUS_OK) {
1299 			bfa_sm_set_state(lps, bfa_lps_sm_online);
1300 			if (lps->fdisc)
1301 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1302 					BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1303 			else
1304 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1305 					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1306 		} else {
1307 			bfa_sm_set_state(lps, bfa_lps_sm_init);
1308 			if (lps->fdisc)
1309 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1310 					BFA_PL_EID_LOGIN, 0,
1311 					"FDISC Fail (RJT or timeout)");
1312 			else
1313 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1314 					BFA_PL_EID_LOGIN, 0,
1315 					"FLOGI Fail (RJT or timeout)");
1316 		}
1317 		bfa_lps_login_comp(lps);
1318 		break;
1319 
1320 	case BFA_LPS_SM_OFFLINE:
1321 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1322 		break;
1323 
1324 	default:
1325 		bfa_sm_fault(lps->bfa, event);
1326 	}
1327 }
1328 
1329 /*
1330  * login pending - awaiting space in request queue
1331  */
1332 static void
1333 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1334 {
1335 	bfa_trc(lps->bfa, lps->lp_tag);
1336 	bfa_trc(lps->bfa, event);
1337 
1338 	switch (event) {
1339 	case BFA_LPS_SM_RESUME:
1340 		bfa_sm_set_state(lps, bfa_lps_sm_login);
1341 		break;
1342 
1343 	case BFA_LPS_SM_OFFLINE:
1344 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1345 		bfa_reqq_wcancel(&lps->wqe);
1346 		break;
1347 
1348 	case BFA_LPS_SM_RX_CVL:
1349 		/*
1350 		 * Login was not even sent out; so when getting out
1351 		 * of this state, it will appear like a login retry
1352 		 * after Clear virtual link
1353 		 */
1354 		break;
1355 
1356 	default:
1357 		bfa_sm_fault(lps->bfa, event);
1358 	}
1359 }
1360 
1361 /*
1362  * login complete
1363  */
1364 static void
1365 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1366 {
1367 	bfa_trc(lps->bfa, lps->lp_tag);
1368 	bfa_trc(lps->bfa, event);
1369 
1370 	switch (event) {
1371 	case BFA_LPS_SM_LOGOUT:
1372 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1373 			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1374 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1375 		} else {
1376 			bfa_sm_set_state(lps, bfa_lps_sm_logout);
1377 			bfa_lps_send_logout(lps);
1378 		}
1379 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1380 			BFA_PL_EID_LOGO, 0, "Logout");
1381 		break;
1382 
1383 	case BFA_LPS_SM_RX_CVL:
1384 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1385 
1386 		/* Let the vport module know about this event */
1387 		bfa_lps_cvl_event(lps);
1388 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1389 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1390 		break;
1391 
1392 	case BFA_LPS_SM_OFFLINE:
1393 	case BFA_LPS_SM_DELETE:
1394 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1395 		break;
1396 
1397 	default:
1398 		bfa_sm_fault(lps->bfa, event);
1399 	}
1400 }
1401 
1402 /*
1403  * logout in progress - awaiting firmware response
1404  */
1405 static void
1406 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1407 {
1408 	bfa_trc(lps->bfa, lps->lp_tag);
1409 	bfa_trc(lps->bfa, event);
1410 
1411 	switch (event) {
1412 	case BFA_LPS_SM_FWRSP:
1413 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1414 		bfa_lps_logout_comp(lps);
1415 		break;
1416 
1417 	case BFA_LPS_SM_OFFLINE:
1418 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1419 		break;
1420 
1421 	default:
1422 		bfa_sm_fault(lps->bfa, event);
1423 	}
1424 }
1425 
1426 /*
1427  * logout pending -- awaiting space in request queue
1428  */
1429 static void
1430 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1431 {
1432 	bfa_trc(lps->bfa, lps->lp_tag);
1433 	bfa_trc(lps->bfa, event);
1434 
1435 	switch (event) {
1436 	case BFA_LPS_SM_RESUME:
1437 		bfa_sm_set_state(lps, bfa_lps_sm_logout);
1438 		bfa_lps_send_logout(lps);
1439 		break;
1440 
1441 	case BFA_LPS_SM_OFFLINE:
1442 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1443 		bfa_reqq_wcancel(&lps->wqe);
1444 		break;
1445 
1446 	default:
1447 		bfa_sm_fault(lps->bfa, event);
1448 	}
1449 }
1450 
1451 
1452 
1453 /*
1454  *  lps_pvt BFA LPS private functions
1455  */
1456 
1457 /*
1458  * return memory requirement
1459  */
1460 static void
1461 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1462 	u32 *dm_len)
1463 {
1464 	if (cfg->drvcfg.min_cfg)
1465 		*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1466 	else
1467 		*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1468 }
1469 
1470 /*
1471  * bfa module attach at initialization time
1472  */
1473 static void
1474 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1475 	struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1476 {
1477 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1478 	struct bfa_lps_s	*lps;
1479 	int			i;
1480 
1481 	memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1482 	mod->num_lps = BFA_LPS_MAX_LPORTS;
1483 	if (cfg->drvcfg.min_cfg)
1484 		mod->num_lps = BFA_LPS_MIN_LPORTS;
1485 	else
1486 		mod->num_lps = BFA_LPS_MAX_LPORTS;
1487 	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1488 
1489 	bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1490 
1491 	INIT_LIST_HEAD(&mod->lps_free_q);
1492 	INIT_LIST_HEAD(&mod->lps_active_q);
1493 
1494 	for (i = 0; i < mod->num_lps; i++, lps++) {
1495 		lps->bfa	= bfa;
1496 		lps->lp_tag	= (u8) i;
1497 		lps->reqq	= BFA_REQQ_LPS;
1498 		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1499 		list_add_tail(&lps->qe, &mod->lps_free_q);
1500 	}
1501 }
1502 
1503 static void
1504 bfa_lps_detach(struct bfa_s *bfa)
1505 {
1506 }
1507 
1508 static void
1509 bfa_lps_start(struct bfa_s *bfa)
1510 {
1511 }
1512 
1513 static void
1514 bfa_lps_stop(struct bfa_s *bfa)
1515 {
1516 }
1517 
1518 /*
1519  * IOC in disabled state -- consider all lps offline
1520  */
1521 static void
1522 bfa_lps_iocdisable(struct bfa_s *bfa)
1523 {
1524 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1525 	struct bfa_lps_s	*lps;
1526 	struct list_head		*qe, *qen;
1527 
1528 	list_for_each_safe(qe, qen, &mod->lps_active_q) {
1529 		lps = (struct bfa_lps_s *) qe;
1530 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1531 	}
1532 }
1533 
1534 /*
1535  * Firmware login response
1536  */
1537 static void
1538 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1539 {
1540 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1541 	struct bfa_lps_s	*lps;
1542 
1543 	bfa_assert(rsp->lp_tag < mod->num_lps);
1544 	lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1545 
1546 	lps->status = rsp->status;
1547 	switch (rsp->status) {
1548 	case BFA_STATUS_OK:
1549 		lps->fport	= rsp->f_port;
1550 		lps->npiv_en	= rsp->npiv_en;
1551 		lps->lp_pid	= rsp->lp_pid;
1552 		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
1553 		lps->pr_pwwn	= rsp->port_name;
1554 		lps->pr_nwwn	= rsp->node_name;
1555 		lps->auth_req	= rsp->auth_req;
1556 		lps->lp_mac	= rsp->lp_mac;
1557 		lps->brcd_switch = rsp->brcd_switch;
1558 		lps->fcf_mac	= rsp->fcf_mac;
1559 
1560 		break;
1561 
1562 	case BFA_STATUS_FABRIC_RJT:
1563 		lps->lsrjt_rsn = rsp->lsrjt_rsn;
1564 		lps->lsrjt_expl = rsp->lsrjt_expl;
1565 
1566 		break;
1567 
1568 	case BFA_STATUS_EPROTOCOL:
1569 		lps->ext_status = rsp->ext_status;
1570 
1571 		break;
1572 
1573 	default:
1574 		/* Nothing to do with other status */
1575 		break;
1576 	}
1577 
1578 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1579 }
1580 
1581 /*
1582  * Firmware logout response
1583  */
1584 static void
1585 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1586 {
1587 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1588 	struct bfa_lps_s	*lps;
1589 
1590 	bfa_assert(rsp->lp_tag < mod->num_lps);
1591 	lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1592 
1593 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1594 }
1595 
1596 /*
1597  * Firmware received a Clear virtual link request (for FCoE)
1598  */
1599 static void
1600 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1601 {
1602 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1603 	struct bfa_lps_s	*lps;
1604 
1605 	lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1606 
1607 	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1608 }
1609 
1610 /*
1611  * Space is available in request queue, resume queueing request to firmware.
1612  */
1613 static void
1614 bfa_lps_reqq_resume(void *lps_arg)
1615 {
1616 	struct bfa_lps_s	*lps = lps_arg;
1617 
1618 	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1619 }
1620 
1621 /*
1622  * lps is freed -- triggered by vport delete
1623  */
1624 static void
1625 bfa_lps_free(struct bfa_lps_s *lps)
1626 {
1627 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1628 
1629 	lps->lp_pid = 0;
1630 	list_del(&lps->qe);
1631 	list_add_tail(&lps->qe, &mod->lps_free_q);
1632 }
1633 
1634 /*
1635  * send login request to firmware
1636  */
1637 static void
1638 bfa_lps_send_login(struct bfa_lps_s *lps)
1639 {
1640 	struct bfi_lps_login_req_s	*m;
1641 
1642 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1643 	bfa_assert(m);
1644 
1645 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1646 		bfa_lpuid(lps->bfa));
1647 
1648 	m->lp_tag	= lps->lp_tag;
1649 	m->alpa		= lps->alpa;
1650 	m->pdu_size	= cpu_to_be16(lps->pdusz);
1651 	m->pwwn		= lps->pwwn;
1652 	m->nwwn		= lps->nwwn;
1653 	m->fdisc	= lps->fdisc;
1654 	m->auth_en	= lps->auth_en;
1655 
1656 	bfa_reqq_produce(lps->bfa, lps->reqq);
1657 }
1658 
1659 /*
1660  * send logout request to firmware
1661  */
1662 static void
1663 bfa_lps_send_logout(struct bfa_lps_s *lps)
1664 {
1665 	struct bfi_lps_logout_req_s *m;
1666 
1667 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1668 	bfa_assert(m);
1669 
1670 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1671 		bfa_lpuid(lps->bfa));
1672 
1673 	m->lp_tag    = lps->lp_tag;
1674 	m->port_name = lps->pwwn;
1675 	bfa_reqq_produce(lps->bfa, lps->reqq);
1676 }
1677 
1678 /*
1679  * Indirect login completion handler for non-fcs
1680  */
1681 static void
1682 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1683 {
1684 	struct bfa_lps_s *lps	= arg;
1685 
1686 	if (!complete)
1687 		return;
1688 
1689 	if (lps->fdisc)
1690 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1691 	else
1692 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1693 }
1694 
1695 /*
1696  * Login completion handler -- direct call for fcs, queue for others
1697  */
1698 static void
1699 bfa_lps_login_comp(struct bfa_lps_s *lps)
1700 {
1701 	if (!lps->bfa->fcs) {
1702 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1703 			lps);
1704 		return;
1705 	}
1706 
1707 	if (lps->fdisc)
1708 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1709 	else
1710 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1711 }
1712 
1713 /*
1714  * Indirect logout completion handler for non-fcs
1715  */
1716 static void
1717 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1718 {
1719 	struct bfa_lps_s *lps	= arg;
1720 
1721 	if (!complete)
1722 		return;
1723 
1724 	if (lps->fdisc)
1725 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1726 }
1727 
1728 /*
1729  * Logout completion handler -- direct call for fcs, queue for others
1730  */
1731 static void
1732 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1733 {
1734 	if (!lps->bfa->fcs) {
1735 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1736 			lps);
1737 		return;
1738 	}
1739 	if (lps->fdisc)
1740 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1741 }
1742 
1743 /*
1744  * Clear virtual link completion handler for non-fcs
1745  */
1746 static void
1747 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1748 {
1749 	struct bfa_lps_s *lps	= arg;
1750 
1751 	if (!complete)
1752 		return;
1753 
1754 	/* Clear virtual link to base port will result in link down */
1755 	if (lps->fdisc)
1756 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1757 }
1758 
1759 /*
1760  * Received Clear virtual link event --direct call for fcs,
1761  * queue for others
1762  */
1763 static void
1764 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1765 {
1766 	if (!lps->bfa->fcs) {
1767 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1768 			lps);
1769 		return;
1770 	}
1771 
1772 	/* Clear virtual link to base port will result in link down */
1773 	if (lps->fdisc)
1774 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1775 }
1776 
1777 
1778 
1779 /*
1780  *  lps_public BFA LPS public functions
1781  */
1782 
1783 u32
1784 bfa_lps_get_max_vport(struct bfa_s *bfa)
1785 {
1786 	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1787 		return BFA_LPS_MAX_VPORTS_SUPP_CT;
1788 	else
1789 		return BFA_LPS_MAX_VPORTS_SUPP_CB;
1790 }
1791 
1792 /*
1793  * Allocate a lport srvice tag.
1794  */
1795 struct bfa_lps_s  *
1796 bfa_lps_alloc(struct bfa_s *bfa)
1797 {
1798 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1799 	struct bfa_lps_s	*lps = NULL;
1800 
1801 	bfa_q_deq(&mod->lps_free_q, &lps);
1802 
1803 	if (lps == NULL)
1804 		return NULL;
1805 
1806 	list_add_tail(&lps->qe, &mod->lps_active_q);
1807 
1808 	bfa_sm_set_state(lps, bfa_lps_sm_init);
1809 	return lps;
1810 }
1811 
1812 /*
1813  * Free lport service tag. This can be called anytime after an alloc.
1814  * No need to wait for any pending login/logout completions.
1815  */
1816 void
1817 bfa_lps_delete(struct bfa_lps_s *lps)
1818 {
1819 	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1820 }
1821 
1822 /*
1823  * Initiate a lport login.
1824  */
1825 void
1826 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1827 	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1828 {
1829 	lps->uarg	= uarg;
1830 	lps->alpa	= alpa;
1831 	lps->pdusz	= pdusz;
1832 	lps->pwwn	= pwwn;
1833 	lps->nwwn	= nwwn;
1834 	lps->fdisc	= BFA_FALSE;
1835 	lps->auth_en	= auth_en;
1836 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1837 }
1838 
1839 /*
1840  * Initiate a lport fdisc login.
1841  */
1842 void
1843 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1844 	wwn_t nwwn)
1845 {
1846 	lps->uarg	= uarg;
1847 	lps->alpa	= 0;
1848 	lps->pdusz	= pdusz;
1849 	lps->pwwn	= pwwn;
1850 	lps->nwwn	= nwwn;
1851 	lps->fdisc	= BFA_TRUE;
1852 	lps->auth_en	= BFA_FALSE;
1853 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1854 }
1855 
1856 /*
1857  * Initiate a lport logout (flogi).
1858  */
1859 void
1860 bfa_lps_flogo(struct bfa_lps_s *lps)
1861 {
1862 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1863 }
1864 
1865 /*
1866  * Initiate a lport FDSIC logout.
1867  */
1868 void
1869 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1870 {
1871 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1872 }
1873 
1874 /*
1875  * Discard a pending login request -- should be called only for
1876  * link down handling.
1877  */
1878 void
1879 bfa_lps_discard(struct bfa_lps_s *lps)
1880 {
1881 	bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1882 }
1883 
1884 /*
1885  * Return lport services tag
1886  */
1887 u8
1888 bfa_lps_get_tag(struct bfa_lps_s *lps)
1889 {
1890 	return lps->lp_tag;
1891 }
1892 
1893 /*
1894  * Return lport services tag given the pid
1895  */
1896 u8
1897 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1898 {
1899 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1900 	struct bfa_lps_s	*lps;
1901 	int			i;
1902 
1903 	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1904 		if (lps->lp_pid == pid)
1905 			return lps->lp_tag;
1906 	}
1907 
1908 	/* Return base port tag anyway */
1909 	return 0;
1910 }
1911 
1912 /*
1913  * return if fabric login indicates support for NPIV
1914  */
1915 bfa_boolean_t
1916 bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
1917 {
1918 	return lps->npiv_en;
1919 }
1920 
1921 /*
1922  * Return TRUE if attached to F-Port, else return FALSE
1923  */
1924 bfa_boolean_t
1925 bfa_lps_is_fport(struct bfa_lps_s *lps)
1926 {
1927 	return lps->fport;
1928 }
1929 
1930 /*
1931  * Return TRUE if attached to a Brocade Fabric
1932  */
1933 bfa_boolean_t
1934 bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
1935 {
1936 	return lps->brcd_switch;
1937 }
1938 /*
1939  * return TRUE if authentication is required
1940  */
1941 bfa_boolean_t
1942 bfa_lps_is_authreq(struct bfa_lps_s *lps)
1943 {
1944 	return lps->auth_req;
1945 }
1946 
1947 bfa_eproto_status_t
1948 bfa_lps_get_extstatus(struct bfa_lps_s *lps)
1949 {
1950 	return lps->ext_status;
1951 }
1952 
1953 /*
1954  * return port id assigned to the lport
1955  */
1956 u32
1957 bfa_lps_get_pid(struct bfa_lps_s *lps)
1958 {
1959 	return lps->lp_pid;
1960 }
1961 
1962 /*
1963  * return port id assigned to the base lport
1964  */
1965 u32
1966 bfa_lps_get_base_pid(struct bfa_s *bfa)
1967 {
1968 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1969 
1970 	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1971 }
1972 
1973 /*
1974  * Return bb_credit assigned in FLOGI response
1975  */
1976 u16
1977 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
1978 {
1979 	return lps->pr_bbcred;
1980 }
1981 
1982 /*
1983  * Return peer port name
1984  */
1985 wwn_t
1986 bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
1987 {
1988 	return lps->pr_pwwn;
1989 }
1990 
1991 /*
1992  * Return peer node name
1993  */
1994 wwn_t
1995 bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
1996 {
1997 	return lps->pr_nwwn;
1998 }
1999 
2000 /*
2001  * return reason code if login request is rejected
2002  */
2003 u8
2004 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
2005 {
2006 	return lps->lsrjt_rsn;
2007 }
2008 
2009 /*
2010  * return explanation code if login request is rejected
2011  */
2012 u8
2013 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
2014 {
2015 	return lps->lsrjt_expl;
2016 }
2017 
2018 /*
2019  * Return fpma/spma MAC for lport
2020  */
2021 mac_t
2022 bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
2023 {
2024 	return lps->lp_mac;
2025 }
2026 
2027 /*
2028  * LPS firmware message class handler.
2029  */
2030 void
2031 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2032 {
2033 	union bfi_lps_i2h_msg_u	msg;
2034 
2035 	bfa_trc(bfa, m->mhdr.msg_id);
2036 	msg.msg = m;
2037 
2038 	switch (m->mhdr.msg_id) {
2039 	case BFI_LPS_H2I_LOGIN_RSP:
2040 		bfa_lps_login_rsp(bfa, msg.login_rsp);
2041 		break;
2042 
2043 	case BFI_LPS_H2I_LOGOUT_RSP:
2044 		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2045 		break;
2046 
2047 	case BFI_LPS_H2I_CVL_EVENT:
2048 		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2049 		break;
2050 
2051 	default:
2052 		bfa_trc(bfa, m->mhdr.msg_id);
2053 		bfa_assert(0);
2054 	}
2055 }
2056 
2057 /*
2058  * FC PORT state machine functions
2059  */
2060 static void
2061 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2062 			enum bfa_fcport_sm_event event)
2063 {
2064 	bfa_trc(fcport->bfa, event);
2065 
2066 	switch (event) {
2067 	case BFA_FCPORT_SM_START:
2068 		/*
2069 		 * Start event after IOC is configured and BFA is started.
2070 		 */
2071 		if (bfa_fcport_send_enable(fcport)) {
2072 			bfa_trc(fcport->bfa, BFA_TRUE);
2073 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2074 		} else {
2075 			bfa_trc(fcport->bfa, BFA_FALSE);
2076 			bfa_sm_set_state(fcport,
2077 					bfa_fcport_sm_enabling_qwait);
2078 		}
2079 		break;
2080 
2081 	case BFA_FCPORT_SM_ENABLE:
2082 		/*
2083 		 * Port is persistently configured to be in enabled state. Do
2084 		 * not change state. Port enabling is done when START event is
2085 		 * received.
2086 		 */
2087 		break;
2088 
2089 	case BFA_FCPORT_SM_DISABLE:
2090 		/*
2091 		 * If a port is persistently configured to be disabled, the
2092 		 * first event will a port disable request.
2093 		 */
2094 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2095 		break;
2096 
2097 	case BFA_FCPORT_SM_HWFAIL:
2098 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2099 		break;
2100 
2101 	default:
2102 		bfa_sm_fault(fcport->bfa, event);
2103 	}
2104 }
2105 
2106 static void
2107 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2108 				enum bfa_fcport_sm_event event)
2109 {
2110 	char pwwn_buf[BFA_STRING_32];
2111 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2112 	bfa_trc(fcport->bfa, event);
2113 
2114 	switch (event) {
2115 	case BFA_FCPORT_SM_QRESUME:
2116 		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2117 		bfa_fcport_send_enable(fcport);
2118 		break;
2119 
2120 	case BFA_FCPORT_SM_STOP:
2121 		bfa_reqq_wcancel(&fcport->reqq_wait);
2122 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2123 		break;
2124 
2125 	case BFA_FCPORT_SM_ENABLE:
2126 		/*
2127 		 * Already enable is in progress.
2128 		 */
2129 		break;
2130 
2131 	case BFA_FCPORT_SM_DISABLE:
2132 		/*
2133 		 * Just send disable request to firmware when room becomes
2134 		 * available in request queue.
2135 		 */
2136 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2137 		bfa_reqq_wcancel(&fcport->reqq_wait);
2138 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2139 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2140 		wwn2str(pwwn_buf, fcport->pwwn);
2141 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2142 			"Base port disabled: WWN = %s\n", pwwn_buf);
2143 		break;
2144 
2145 	case BFA_FCPORT_SM_LINKUP:
2146 	case BFA_FCPORT_SM_LINKDOWN:
2147 		/*
2148 		 * Possible to get link events when doing back-to-back
2149 		 * enable/disables.
2150 		 */
2151 		break;
2152 
2153 	case BFA_FCPORT_SM_HWFAIL:
2154 		bfa_reqq_wcancel(&fcport->reqq_wait);
2155 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2156 		break;
2157 
2158 	default:
2159 		bfa_sm_fault(fcport->bfa, event);
2160 	}
2161 }
2162 
2163 static void
2164 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2165 						enum bfa_fcport_sm_event event)
2166 {
2167 	char pwwn_buf[BFA_STRING_32];
2168 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2169 	bfa_trc(fcport->bfa, event);
2170 
2171 	switch (event) {
2172 	case BFA_FCPORT_SM_FWRSP:
2173 	case BFA_FCPORT_SM_LINKDOWN:
2174 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2175 		break;
2176 
2177 	case BFA_FCPORT_SM_LINKUP:
2178 		bfa_fcport_update_linkinfo(fcport);
2179 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2180 
2181 		bfa_assert(fcport->event_cbfn);
2182 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2183 		break;
2184 
2185 	case BFA_FCPORT_SM_ENABLE:
2186 		/*
2187 		 * Already being enabled.
2188 		 */
2189 		break;
2190 
2191 	case BFA_FCPORT_SM_DISABLE:
2192 		if (bfa_fcport_send_disable(fcport))
2193 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2194 		else
2195 			bfa_sm_set_state(fcport,
2196 					 bfa_fcport_sm_disabling_qwait);
2197 
2198 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2199 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2200 		wwn2str(pwwn_buf, fcport->pwwn);
2201 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2202 			"Base port disabled: WWN = %s\n", pwwn_buf);
2203 		break;
2204 
2205 	case BFA_FCPORT_SM_STOP:
2206 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2207 		break;
2208 
2209 	case BFA_FCPORT_SM_HWFAIL:
2210 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2211 		break;
2212 
2213 	default:
2214 		bfa_sm_fault(fcport->bfa, event);
2215 	}
2216 }
2217 
2218 static void
2219 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2220 						enum bfa_fcport_sm_event event)
2221 {
2222 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2223 	char pwwn_buf[BFA_STRING_32];
2224 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2225 
2226 	bfa_trc(fcport->bfa, event);
2227 
2228 	switch (event) {
2229 	case BFA_FCPORT_SM_LINKUP:
2230 		bfa_fcport_update_linkinfo(fcport);
2231 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2232 		bfa_assert(fcport->event_cbfn);
2233 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2234 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2235 		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2236 
2237 			bfa_trc(fcport->bfa,
2238 				pevent->link_state.vc_fcf.fcf.fipenabled);
2239 			bfa_trc(fcport->bfa,
2240 				pevent->link_state.vc_fcf.fcf.fipfailed);
2241 
2242 			if (pevent->link_state.vc_fcf.fcf.fipfailed)
2243 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2244 					BFA_PL_EID_FIP_FCF_DISC, 0,
2245 					"FIP FCF Discovery Failed");
2246 			else
2247 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2248 					BFA_PL_EID_FIP_FCF_DISC, 0,
2249 					"FIP FCF Discovered");
2250 		}
2251 
2252 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2253 		wwn2str(pwwn_buf, fcport->pwwn);
2254 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2255 			"Base port online: WWN = %s\n", pwwn_buf);
2256 		break;
2257 
2258 	case BFA_FCPORT_SM_LINKDOWN:
2259 		/*
2260 		 * Possible to get link down event.
2261 		 */
2262 		break;
2263 
2264 	case BFA_FCPORT_SM_ENABLE:
2265 		/*
2266 		 * Already enabled.
2267 		 */
2268 		break;
2269 
2270 	case BFA_FCPORT_SM_DISABLE:
2271 		if (bfa_fcport_send_disable(fcport))
2272 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2273 		else
2274 			bfa_sm_set_state(fcport,
2275 					 bfa_fcport_sm_disabling_qwait);
2276 
2277 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2278 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2279 		wwn2str(pwwn_buf, fcport->pwwn);
2280 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2281 			"Base port disabled: WWN = %s\n", pwwn_buf);
2282 		break;
2283 
2284 	case BFA_FCPORT_SM_STOP:
2285 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2286 		break;
2287 
2288 	case BFA_FCPORT_SM_HWFAIL:
2289 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2290 		break;
2291 
2292 	default:
2293 		bfa_sm_fault(fcport->bfa, event);
2294 	}
2295 }
2296 
2297 static void
2298 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2299 	enum bfa_fcport_sm_event event)
2300 {
2301 	char pwwn_buf[BFA_STRING_32];
2302 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2303 
2304 	bfa_trc(fcport->bfa, event);
2305 
2306 	switch (event) {
2307 	case BFA_FCPORT_SM_ENABLE:
2308 		/*
2309 		 * Already enabled.
2310 		 */
2311 		break;
2312 
2313 	case BFA_FCPORT_SM_DISABLE:
2314 		if (bfa_fcport_send_disable(fcport))
2315 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2316 		else
2317 			bfa_sm_set_state(fcport,
2318 					 bfa_fcport_sm_disabling_qwait);
2319 
2320 		bfa_fcport_reset_linkinfo(fcport);
2321 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2322 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2323 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2324 		wwn2str(pwwn_buf, fcport->pwwn);
2325 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2326 			"Base port offline: WWN = %s\n", pwwn_buf);
2327 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2328 			"Base port disabled: WWN = %s\n", pwwn_buf);
2329 		break;
2330 
2331 	case BFA_FCPORT_SM_LINKDOWN:
2332 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2333 		bfa_fcport_reset_linkinfo(fcport);
2334 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2335 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2336 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2337 		wwn2str(pwwn_buf, fcport->pwwn);
2338 		if (BFA_PORT_IS_DISABLED(fcport->bfa))
2339 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2340 				"Base port offline: WWN = %s\n", pwwn_buf);
2341 		else
2342 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2343 				"Base port (WWN = %s) "
2344 				"lost fabric connectivity\n", pwwn_buf);
2345 		break;
2346 
2347 	case BFA_FCPORT_SM_STOP:
2348 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2349 		bfa_fcport_reset_linkinfo(fcport);
2350 		wwn2str(pwwn_buf, fcport->pwwn);
2351 		if (BFA_PORT_IS_DISABLED(fcport->bfa))
2352 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2353 				"Base port offline: WWN = %s\n", pwwn_buf);
2354 		else
2355 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2356 				"Base port (WWN = %s) "
2357 				"lost fabric connectivity\n", pwwn_buf);
2358 		break;
2359 
2360 	case BFA_FCPORT_SM_HWFAIL:
2361 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2362 		bfa_fcport_reset_linkinfo(fcport);
2363 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2364 		wwn2str(pwwn_buf, fcport->pwwn);
2365 		if (BFA_PORT_IS_DISABLED(fcport->bfa))
2366 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2367 				"Base port offline: WWN = %s\n", pwwn_buf);
2368 		else
2369 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2370 				"Base port (WWN = %s) "
2371 				"lost fabric connectivity\n", pwwn_buf);
2372 		break;
2373 
2374 	default:
2375 		bfa_sm_fault(fcport->bfa, event);
2376 	}
2377 }
2378 
2379 static void
2380 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2381 				 enum bfa_fcport_sm_event event)
2382 {
2383 	bfa_trc(fcport->bfa, event);
2384 
2385 	switch (event) {
2386 	case BFA_FCPORT_SM_QRESUME:
2387 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2388 		bfa_fcport_send_disable(fcport);
2389 		break;
2390 
2391 	case BFA_FCPORT_SM_STOP:
2392 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2393 		bfa_reqq_wcancel(&fcport->reqq_wait);
2394 		break;
2395 
2396 	case BFA_FCPORT_SM_ENABLE:
2397 		bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2398 		break;
2399 
2400 	case BFA_FCPORT_SM_DISABLE:
2401 		/*
2402 		 * Already being disabled.
2403 		 */
2404 		break;
2405 
2406 	case BFA_FCPORT_SM_LINKUP:
2407 	case BFA_FCPORT_SM_LINKDOWN:
2408 		/*
2409 		 * Possible to get link events when doing back-to-back
2410 		 * enable/disables.
2411 		 */
2412 		break;
2413 
2414 	case BFA_FCPORT_SM_HWFAIL:
2415 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2416 		bfa_reqq_wcancel(&fcport->reqq_wait);
2417 		break;
2418 
2419 	default:
2420 		bfa_sm_fault(fcport->bfa, event);
2421 	}
2422 }
2423 
2424 static void
2425 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2426 				 enum bfa_fcport_sm_event event)
2427 {
2428 	bfa_trc(fcport->bfa, event);
2429 
2430 	switch (event) {
2431 	case BFA_FCPORT_SM_QRESUME:
2432 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2433 		bfa_fcport_send_disable(fcport);
2434 		if (bfa_fcport_send_enable(fcport))
2435 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2436 		else
2437 			bfa_sm_set_state(fcport,
2438 					 bfa_fcport_sm_enabling_qwait);
2439 		break;
2440 
2441 	case BFA_FCPORT_SM_STOP:
2442 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2443 		bfa_reqq_wcancel(&fcport->reqq_wait);
2444 		break;
2445 
2446 	case BFA_FCPORT_SM_ENABLE:
2447 		break;
2448 
2449 	case BFA_FCPORT_SM_DISABLE:
2450 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2451 		break;
2452 
2453 	case BFA_FCPORT_SM_LINKUP:
2454 	case BFA_FCPORT_SM_LINKDOWN:
2455 		/*
2456 		 * Possible to get link events when doing back-to-back
2457 		 * enable/disables.
2458 		 */
2459 		break;
2460 
2461 	case BFA_FCPORT_SM_HWFAIL:
2462 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2463 		bfa_reqq_wcancel(&fcport->reqq_wait);
2464 		break;
2465 
2466 	default:
2467 		bfa_sm_fault(fcport->bfa, event);
2468 	}
2469 }
2470 
2471 static void
2472 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2473 						enum bfa_fcport_sm_event event)
2474 {
2475 	char pwwn_buf[BFA_STRING_32];
2476 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2477 	bfa_trc(fcport->bfa, event);
2478 
2479 	switch (event) {
2480 	case BFA_FCPORT_SM_FWRSP:
2481 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2482 		break;
2483 
2484 	case BFA_FCPORT_SM_DISABLE:
2485 		/*
2486 		 * Already being disabled.
2487 		 */
2488 		break;
2489 
2490 	case BFA_FCPORT_SM_ENABLE:
2491 		if (bfa_fcport_send_enable(fcport))
2492 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2493 		else
2494 			bfa_sm_set_state(fcport,
2495 					 bfa_fcport_sm_enabling_qwait);
2496 
2497 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2498 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2499 		wwn2str(pwwn_buf, fcport->pwwn);
2500 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2501 			"Base port enabled: WWN = %s\n", pwwn_buf);
2502 		break;
2503 
2504 	case BFA_FCPORT_SM_STOP:
2505 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2506 		break;
2507 
2508 	case BFA_FCPORT_SM_LINKUP:
2509 	case BFA_FCPORT_SM_LINKDOWN:
2510 		/*
2511 		 * Possible to get link events when doing back-to-back
2512 		 * enable/disables.
2513 		 */
2514 		break;
2515 
2516 	case BFA_FCPORT_SM_HWFAIL:
2517 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2518 		break;
2519 
2520 	default:
2521 		bfa_sm_fault(fcport->bfa, event);
2522 	}
2523 }
2524 
2525 static void
2526 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2527 						enum bfa_fcport_sm_event event)
2528 {
2529 	char pwwn_buf[BFA_STRING_32];
2530 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2531 	bfa_trc(fcport->bfa, event);
2532 
2533 	switch (event) {
2534 	case BFA_FCPORT_SM_START:
2535 		/*
2536 		 * Ignore start event for a port that is disabled.
2537 		 */
2538 		break;
2539 
2540 	case BFA_FCPORT_SM_STOP:
2541 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2542 		break;
2543 
2544 	case BFA_FCPORT_SM_ENABLE:
2545 		if (bfa_fcport_send_enable(fcport))
2546 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2547 		else
2548 			bfa_sm_set_state(fcport,
2549 					 bfa_fcport_sm_enabling_qwait);
2550 
2551 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2552 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2553 		wwn2str(pwwn_buf, fcport->pwwn);
2554 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2555 			"Base port enabled: WWN = %s\n", pwwn_buf);
2556 		break;
2557 
2558 	case BFA_FCPORT_SM_DISABLE:
2559 		/*
2560 		 * Already disabled.
2561 		 */
2562 		break;
2563 
2564 	case BFA_FCPORT_SM_HWFAIL:
2565 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2566 		break;
2567 
2568 	default:
2569 		bfa_sm_fault(fcport->bfa, event);
2570 	}
2571 }
2572 
2573 static void
2574 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2575 			 enum bfa_fcport_sm_event event)
2576 {
2577 	bfa_trc(fcport->bfa, event);
2578 
2579 	switch (event) {
2580 	case BFA_FCPORT_SM_START:
2581 		if (bfa_fcport_send_enable(fcport))
2582 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2583 		else
2584 			bfa_sm_set_state(fcport,
2585 					 bfa_fcport_sm_enabling_qwait);
2586 		break;
2587 
2588 	default:
2589 		/*
2590 		 * Ignore all other events.
2591 		 */
2592 		;
2593 	}
2594 }
2595 
2596 /*
2597  * Port is enabled. IOC is down/failed.
2598  */
2599 static void
2600 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2601 			 enum bfa_fcport_sm_event event)
2602 {
2603 	bfa_trc(fcport->bfa, event);
2604 
2605 	switch (event) {
2606 	case BFA_FCPORT_SM_START:
2607 		if (bfa_fcport_send_enable(fcport))
2608 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2609 		else
2610 			bfa_sm_set_state(fcport,
2611 					 bfa_fcport_sm_enabling_qwait);
2612 		break;
2613 
2614 	default:
2615 		/*
2616 		 * Ignore all events.
2617 		 */
2618 		;
2619 	}
2620 }
2621 
2622 /*
2623  * Port is disabled. IOC is down/failed.
2624  */
2625 static void
2626 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2627 			 enum bfa_fcport_sm_event event)
2628 {
2629 	bfa_trc(fcport->bfa, event);
2630 
2631 	switch (event) {
2632 	case BFA_FCPORT_SM_START:
2633 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2634 		break;
2635 
2636 	case BFA_FCPORT_SM_ENABLE:
2637 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2638 		break;
2639 
2640 	default:
2641 		/*
2642 		 * Ignore all events.
2643 		 */
2644 		;
2645 	}
2646 }
2647 
2648 /*
2649  * Link state is down
2650  */
2651 static void
2652 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2653 		enum bfa_fcport_ln_sm_event event)
2654 {
2655 	bfa_trc(ln->fcport->bfa, event);
2656 
2657 	switch (event) {
2658 	case BFA_FCPORT_LN_SM_LINKUP:
2659 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2660 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2661 		break;
2662 
2663 	default:
2664 		bfa_sm_fault(ln->fcport->bfa, event);
2665 	}
2666 }
2667 
2668 /*
2669  * Link state is waiting for down notification
2670  */
2671 static void
2672 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2673 		enum bfa_fcport_ln_sm_event event)
2674 {
2675 	bfa_trc(ln->fcport->bfa, event);
2676 
2677 	switch (event) {
2678 	case BFA_FCPORT_LN_SM_LINKUP:
2679 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2680 		break;
2681 
2682 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2683 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2684 		break;
2685 
2686 	default:
2687 		bfa_sm_fault(ln->fcport->bfa, event);
2688 	}
2689 }
2690 
2691 /*
2692  * Link state is waiting for down notification and there is a pending up
2693  */
2694 static void
2695 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2696 		enum bfa_fcport_ln_sm_event event)
2697 {
2698 	bfa_trc(ln->fcport->bfa, event);
2699 
2700 	switch (event) {
2701 	case BFA_FCPORT_LN_SM_LINKDOWN:
2702 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2703 		break;
2704 
2705 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2706 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2707 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2708 		break;
2709 
2710 	default:
2711 		bfa_sm_fault(ln->fcport->bfa, event);
2712 	}
2713 }
2714 
2715 /*
2716  * Link state is up
2717  */
2718 static void
2719 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2720 		enum bfa_fcport_ln_sm_event event)
2721 {
2722 	bfa_trc(ln->fcport->bfa, event);
2723 
2724 	switch (event) {
2725 	case BFA_FCPORT_LN_SM_LINKDOWN:
2726 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2727 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2728 		break;
2729 
2730 	default:
2731 		bfa_sm_fault(ln->fcport->bfa, event);
2732 	}
2733 }
2734 
2735 /*
2736  * Link state is waiting for up notification
2737  */
2738 static void
2739 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2740 		enum bfa_fcport_ln_sm_event event)
2741 {
2742 	bfa_trc(ln->fcport->bfa, event);
2743 
2744 	switch (event) {
2745 	case BFA_FCPORT_LN_SM_LINKDOWN:
2746 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2747 		break;
2748 
2749 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2750 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2751 		break;
2752 
2753 	default:
2754 		bfa_sm_fault(ln->fcport->bfa, event);
2755 	}
2756 }
2757 
2758 /*
2759  * Link state is waiting for up notification and there is a pending down
2760  */
2761 static void
2762 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2763 		enum bfa_fcport_ln_sm_event event)
2764 {
2765 	bfa_trc(ln->fcport->bfa, event);
2766 
2767 	switch (event) {
2768 	case BFA_FCPORT_LN_SM_LINKUP:
2769 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2770 		break;
2771 
2772 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2773 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2774 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2775 		break;
2776 
2777 	default:
2778 		bfa_sm_fault(ln->fcport->bfa, event);
2779 	}
2780 }
2781 
2782 /*
2783  * Link state is waiting for up notification and there are pending down and up
2784  */
2785 static void
2786 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2787 			enum bfa_fcport_ln_sm_event event)
2788 {
2789 	bfa_trc(ln->fcport->bfa, event);
2790 
2791 	switch (event) {
2792 	case BFA_FCPORT_LN_SM_LINKDOWN:
2793 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2794 		break;
2795 
2796 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2797 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2798 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2799 		break;
2800 
2801 	default:
2802 		bfa_sm_fault(ln->fcport->bfa, event);
2803 	}
2804 }
2805 
2806 
2807 
2808 /*
2809  *  hal_port_private
2810  */
2811 
2812 static void
2813 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2814 {
2815 	struct bfa_fcport_ln_s *ln = cbarg;
2816 
2817 	if (complete)
2818 		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2819 	else
2820 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2821 }
2822 
2823 /*
2824  * Send SCN notification to upper layers.
2825  * trunk - false if caller is fcport to ignore fcport event in trunked mode
2826  */
2827 static void
2828 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2829 	bfa_boolean_t trunk)
2830 {
2831 	if (fcport->cfg.trunked && !trunk)
2832 		return;
2833 
2834 	switch (event) {
2835 	case BFA_PORT_LINKUP:
2836 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2837 		break;
2838 	case BFA_PORT_LINKDOWN:
2839 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2840 		break;
2841 	default:
2842 		bfa_assert(0);
2843 	}
2844 }
2845 
2846 static void
2847 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2848 {
2849 	struct bfa_fcport_s *fcport = ln->fcport;
2850 
2851 	if (fcport->bfa->fcs) {
2852 		fcport->event_cbfn(fcport->event_cbarg, event);
2853 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2854 	} else {
2855 		ln->ln_event = event;
2856 		bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2857 			__bfa_cb_fcport_event, ln);
2858 	}
2859 }
2860 
2861 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2862 							BFA_CACHELINE_SZ))
2863 
2864 static void
2865 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2866 		u32 *dm_len)
2867 {
2868 	*dm_len += FCPORT_STATS_DMA_SZ;
2869 }
2870 
2871 static void
2872 bfa_fcport_qresume(void *cbarg)
2873 {
2874 	struct bfa_fcport_s *fcport = cbarg;
2875 
2876 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2877 }
2878 
2879 static void
2880 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2881 {
2882 	u8		*dm_kva;
2883 	u64	dm_pa;
2884 
2885 	dm_kva = bfa_meminfo_dma_virt(meminfo);
2886 	dm_pa  = bfa_meminfo_dma_phys(meminfo);
2887 
2888 	fcport->stats_kva = dm_kva;
2889 	fcport->stats_pa  = dm_pa;
2890 	fcport->stats	  = (union bfa_fcport_stats_u *) dm_kva;
2891 
2892 	dm_kva += FCPORT_STATS_DMA_SZ;
2893 	dm_pa  += FCPORT_STATS_DMA_SZ;
2894 
2895 	bfa_meminfo_dma_virt(meminfo) = dm_kva;
2896 	bfa_meminfo_dma_phys(meminfo) = dm_pa;
2897 }
2898 
2899 /*
2900  * Memory initialization.
2901  */
2902 static void
2903 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2904 		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2905 {
2906 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2907 	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2908 	struct bfa_fcport_ln_s *ln = &fcport->ln;
2909 	struct bfa_timeval_s tv;
2910 
2911 	memset(fcport, 0, sizeof(struct bfa_fcport_s));
2912 	fcport->bfa = bfa;
2913 	ln->fcport = fcport;
2914 
2915 	bfa_fcport_mem_claim(fcport, meminfo);
2916 
2917 	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2918 	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2919 
2920 	/*
2921 	 * initialize time stamp for stats reset
2922 	 */
2923 	bfa_os_gettimeofday(&tv);
2924 	fcport->stats_reset_time = tv.tv_sec;
2925 
2926 	/*
2927 	 * initialize and set default configuration
2928 	 */
2929 	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2930 	port_cfg->speed = BFA_PORT_SPEED_AUTO;
2931 	port_cfg->trunked = BFA_FALSE;
2932 	port_cfg->maxfrsize = 0;
2933 
2934 	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2935 
2936 	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2937 }
2938 
2939 static void
2940 bfa_fcport_detach(struct bfa_s *bfa)
2941 {
2942 }
2943 
2944 /*
2945  * Called when IOC is ready.
2946  */
2947 static void
2948 bfa_fcport_start(struct bfa_s *bfa)
2949 {
2950 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2951 }
2952 
2953 /*
2954  * Called before IOC is stopped.
2955  */
2956 static void
2957 bfa_fcport_stop(struct bfa_s *bfa)
2958 {
2959 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2960 	bfa_trunk_iocdisable(bfa);
2961 }
2962 
2963 /*
2964  * Called when IOC failure is detected.
2965  */
2966 static void
2967 bfa_fcport_iocdisable(struct bfa_s *bfa)
2968 {
2969 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2970 
2971 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2972 	bfa_trunk_iocdisable(bfa);
2973 }
2974 
2975 static void
2976 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2977 {
2978 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2979 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2980 
2981 	fcport->speed = pevent->link_state.speed;
2982 	fcport->topology = pevent->link_state.topology;
2983 
2984 	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2985 		fcport->myalpa = 0;
2986 
2987 	/* QoS Details */
2988 	fcport->qos_attr = pevent->link_state.qos_attr;
2989 	fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2990 
2991 	/*
2992 	 * update trunk state if applicable
2993 	 */
2994 	if (!fcport->cfg.trunked)
2995 		trunk->attr.state = BFA_TRUNK_DISABLED;
2996 
2997 	/* update FCoE specific */
2998 	fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
2999 
3000 	bfa_trc(fcport->bfa, fcport->speed);
3001 	bfa_trc(fcport->bfa, fcport->topology);
3002 }
3003 
3004 static void
3005 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3006 {
3007 	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3008 	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3009 }
3010 
3011 /*
3012  * Send port enable message to firmware.
3013  */
3014 static bfa_boolean_t
3015 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3016 {
3017 	struct bfi_fcport_enable_req_s *m;
3018 
3019 	/*
3020 	 * Increment message tag before queue check, so that responses to old
3021 	 * requests are discarded.
3022 	 */
3023 	fcport->msgtag++;
3024 
3025 	/*
3026 	 * check for room in queue to send request now
3027 	 */
3028 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3029 	if (!m) {
3030 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3031 							&fcport->reqq_wait);
3032 		return BFA_FALSE;
3033 	}
3034 
3035 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3036 			bfa_lpuid(fcport->bfa));
3037 	m->nwwn = fcport->nwwn;
3038 	m->pwwn = fcport->pwwn;
3039 	m->port_cfg = fcport->cfg;
3040 	m->msgtag = fcport->msgtag;
3041 	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3042 	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3043 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3044 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3045 
3046 	/*
3047 	 * queue I/O message to firmware
3048 	 */
3049 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3050 	return BFA_TRUE;
3051 }
3052 
3053 /*
3054  * Send port disable message to firmware.
3055  */
3056 static	bfa_boolean_t
3057 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3058 {
3059 	struct bfi_fcport_req_s *m;
3060 
3061 	/*
3062 	 * Increment message tag before queue check, so that responses to old
3063 	 * requests are discarded.
3064 	 */
3065 	fcport->msgtag++;
3066 
3067 	/*
3068 	 * check for room in queue to send request now
3069 	 */
3070 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3071 	if (!m) {
3072 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3073 							&fcport->reqq_wait);
3074 		return BFA_FALSE;
3075 	}
3076 
3077 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3078 			bfa_lpuid(fcport->bfa));
3079 	m->msgtag = fcport->msgtag;
3080 
3081 	/*
3082 	 * queue I/O message to firmware
3083 	 */
3084 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3085 
3086 	return BFA_TRUE;
3087 }
3088 
3089 static void
3090 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3091 {
3092 	fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
3093 	fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
3094 
3095 	bfa_trc(fcport->bfa, fcport->pwwn);
3096 	bfa_trc(fcport->bfa, fcport->nwwn);
3097 }
3098 
3099 static void
3100 bfa_fcport_send_txcredit(void *port_cbarg)
3101 {
3102 
3103 	struct bfa_fcport_s *fcport = port_cbarg;
3104 	struct bfi_fcport_set_svc_params_req_s *m;
3105 
3106 	/*
3107 	 * check for room in queue to send request now
3108 	 */
3109 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3110 	if (!m) {
3111 		bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3112 		return;
3113 	}
3114 
3115 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3116 			bfa_lpuid(fcport->bfa));
3117 	m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3118 
3119 	/*
3120 	 * queue I/O message to firmware
3121 	 */
3122 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3123 }
3124 
3125 static void
3126 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3127 	struct bfa_qos_stats_s *s)
3128 {
3129 	u32	*dip = (u32 *) d;
3130 	u32	*sip = (u32 *) s;
3131 	int		i;
3132 
3133 	/* Now swap the 32 bit fields */
3134 	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3135 		dip[i] = be32_to_cpu(sip[i]);
3136 }
3137 
3138 static void
3139 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3140 	struct bfa_fcoe_stats_s *s)
3141 {
3142 	u32	*dip = (u32 *) d;
3143 	u32	*sip = (u32 *) s;
3144 	int		i;
3145 
3146 	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3147 	     i = i + 2) {
3148 #ifdef __BIGENDIAN
3149 		dip[i] = be32_to_cpu(sip[i]);
3150 		dip[i + 1] = be32_to_cpu(sip[i + 1]);
3151 #else
3152 		dip[i] = be32_to_cpu(sip[i + 1]);
3153 		dip[i + 1] = be32_to_cpu(sip[i]);
3154 #endif
3155 	}
3156 }
3157 
3158 static void
3159 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3160 {
3161 	struct bfa_fcport_s *fcport = cbarg;
3162 
3163 	if (complete) {
3164 		if (fcport->stats_status == BFA_STATUS_OK) {
3165 			struct bfa_timeval_s tv;
3166 
3167 			/* Swap FC QoS or FCoE stats */
3168 			if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3169 				bfa_fcport_qos_stats_swap(
3170 					&fcport->stats_ret->fcqos,
3171 					&fcport->stats->fcqos);
3172 			} else {
3173 				bfa_fcport_fcoe_stats_swap(
3174 					&fcport->stats_ret->fcoe,
3175 					&fcport->stats->fcoe);
3176 
3177 				bfa_os_gettimeofday(&tv);
3178 				fcport->stats_ret->fcoe.secs_reset =
3179 					tv.tv_sec - fcport->stats_reset_time;
3180 			}
3181 		}
3182 		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3183 	} else {
3184 		fcport->stats_busy = BFA_FALSE;
3185 		fcport->stats_status = BFA_STATUS_OK;
3186 	}
3187 }
3188 
3189 static void
3190 bfa_fcport_stats_get_timeout(void *cbarg)
3191 {
3192 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3193 
3194 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3195 
3196 	if (fcport->stats_qfull) {
3197 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3198 		fcport->stats_qfull = BFA_FALSE;
3199 	}
3200 
3201 	fcport->stats_status = BFA_STATUS_ETIMER;
3202 	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3203 		fcport);
3204 }
3205 
3206 static void
3207 bfa_fcport_send_stats_get(void *cbarg)
3208 {
3209 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3210 	struct bfi_fcport_req_s *msg;
3211 
3212 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3213 
3214 	if (!msg) {
3215 		fcport->stats_qfull = BFA_TRUE;
3216 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3217 				bfa_fcport_send_stats_get, fcport);
3218 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3219 				&fcport->stats_reqq_wait);
3220 		return;
3221 	}
3222 	fcport->stats_qfull = BFA_FALSE;
3223 
3224 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3225 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3226 			bfa_lpuid(fcport->bfa));
3227 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3228 }
3229 
3230 static void
3231 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3232 {
3233 	struct bfa_fcport_s *fcport = cbarg;
3234 
3235 	if (complete) {
3236 		struct bfa_timeval_s tv;
3237 
3238 		/*
3239 		 * re-initialize time stamp for stats reset
3240 		 */
3241 		bfa_os_gettimeofday(&tv);
3242 		fcport->stats_reset_time = tv.tv_sec;
3243 
3244 		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3245 	} else {
3246 		fcport->stats_busy = BFA_FALSE;
3247 		fcport->stats_status = BFA_STATUS_OK;
3248 	}
3249 }
3250 
3251 static void
3252 bfa_fcport_stats_clr_timeout(void *cbarg)
3253 {
3254 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3255 
3256 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3257 
3258 	if (fcport->stats_qfull) {
3259 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3260 		fcport->stats_qfull = BFA_FALSE;
3261 	}
3262 
3263 	fcport->stats_status = BFA_STATUS_ETIMER;
3264 	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3265 			__bfa_cb_fcport_stats_clr, fcport);
3266 }
3267 
3268 static void
3269 bfa_fcport_send_stats_clear(void *cbarg)
3270 {
3271 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3272 	struct bfi_fcport_req_s *msg;
3273 
3274 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3275 
3276 	if (!msg) {
3277 		fcport->stats_qfull = BFA_TRUE;
3278 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3279 				bfa_fcport_send_stats_clear, fcport);
3280 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3281 						&fcport->stats_reqq_wait);
3282 		return;
3283 	}
3284 	fcport->stats_qfull = BFA_FALSE;
3285 
3286 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3287 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3288 			bfa_lpuid(fcport->bfa));
3289 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3290 }
3291 
3292 /*
3293  * Handle trunk SCN event from firmware.
3294  */
3295 static void
3296 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3297 {
3298 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3299 	struct bfi_fcport_trunk_link_s *tlink;
3300 	struct bfa_trunk_link_attr_s *lattr;
3301 	enum bfa_trunk_state state_prev;
3302 	int i;
3303 	int link_bm = 0;
3304 
3305 	bfa_trc(fcport->bfa, fcport->cfg.trunked);
3306 	bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3307 		   scn->trunk_state == BFA_TRUNK_OFFLINE);
3308 
3309 	bfa_trc(fcport->bfa, trunk->attr.state);
3310 	bfa_trc(fcport->bfa, scn->trunk_state);
3311 	bfa_trc(fcport->bfa, scn->trunk_speed);
3312 
3313 	/*
3314 	 * Save off new state for trunk attribute query
3315 	 */
3316 	state_prev = trunk->attr.state;
3317 	if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3318 		trunk->attr.state = scn->trunk_state;
3319 	trunk->attr.speed = scn->trunk_speed;
3320 	for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3321 		lattr = &trunk->attr.link_attr[i];
3322 		tlink = &scn->tlink[i];
3323 
3324 		lattr->link_state = tlink->state;
3325 		lattr->trunk_wwn  = tlink->trunk_wwn;
3326 		lattr->fctl	  = tlink->fctl;
3327 		lattr->speed	  = tlink->speed;
3328 		lattr->deskew	  = be32_to_cpu(tlink->deskew);
3329 
3330 		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3331 			fcport->speed	 = tlink->speed;
3332 			fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3333 			link_bm |= 1 << i;
3334 		}
3335 
3336 		bfa_trc(fcport->bfa, lattr->link_state);
3337 		bfa_trc(fcport->bfa, lattr->trunk_wwn);
3338 		bfa_trc(fcport->bfa, lattr->fctl);
3339 		bfa_trc(fcport->bfa, lattr->speed);
3340 		bfa_trc(fcport->bfa, lattr->deskew);
3341 	}
3342 
3343 	switch (link_bm) {
3344 	case 3:
3345 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3346 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3347 		break;
3348 	case 2:
3349 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3350 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3351 		break;
3352 	case 1:
3353 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3354 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3355 		break;
3356 	default:
3357 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3358 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3359 	}
3360 
3361 	/*
3362 	 * Notify upper layers if trunk state changed.
3363 	 */
3364 	if ((state_prev != trunk->attr.state) ||
3365 		(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3366 		bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3367 			BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3368 	}
3369 }
3370 
3371 static void
3372 bfa_trunk_iocdisable(struct bfa_s *bfa)
3373 {
3374 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3375 	int i = 0;
3376 
3377 	/*
3378 	 * In trunked mode, notify upper layers that link is down
3379 	 */
3380 	if (fcport->cfg.trunked) {
3381 		if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3382 			bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3383 
3384 		fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3385 		fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3386 		for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3387 			fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3388 			fcport->trunk.attr.link_attr[i].fctl =
3389 						BFA_TRUNK_LINK_FCTL_NORMAL;
3390 			fcport->trunk.attr.link_attr[i].link_state =
3391 						BFA_TRUNK_LINK_STATE_DN_LINKDN;
3392 			fcport->trunk.attr.link_attr[i].speed =
3393 						BFA_PORT_SPEED_UNKNOWN;
3394 			fcport->trunk.attr.link_attr[i].deskew = 0;
3395 		}
3396 	}
3397 }
3398 
3399 
3400 
3401 /*
3402  *  hal_port_public
3403  */
3404 
3405 /*
3406  * Called to initialize port attributes
3407  */
3408 void
3409 bfa_fcport_init(struct bfa_s *bfa)
3410 {
3411 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3412 
3413 	/*
3414 	 * Initialize port attributes from IOC hardware data.
3415 	 */
3416 	bfa_fcport_set_wwns(fcport);
3417 	if (fcport->cfg.maxfrsize == 0)
3418 		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3419 	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3420 	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3421 
3422 	bfa_assert(fcport->cfg.maxfrsize);
3423 	bfa_assert(fcport->cfg.rx_bbcredit);
3424 	bfa_assert(fcport->speed_sup);
3425 }
3426 
3427 /*
3428  * Firmware message handler.
3429  */
3430 void
3431 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3432 {
3433 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3434 	union bfi_fcport_i2h_msg_u i2hmsg;
3435 
3436 	i2hmsg.msg = msg;
3437 	fcport->event_arg.i2hmsg = i2hmsg;
3438 
3439 	bfa_trc(bfa, msg->mhdr.msg_id);
3440 	bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3441 
3442 	switch (msg->mhdr.msg_id) {
3443 	case BFI_FCPORT_I2H_ENABLE_RSP:
3444 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3445 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3446 		break;
3447 
3448 	case BFI_FCPORT_I2H_DISABLE_RSP:
3449 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3450 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3451 		break;
3452 
3453 	case BFI_FCPORT_I2H_EVENT:
3454 		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3455 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3456 		else
3457 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3458 		break;
3459 
3460 	case BFI_FCPORT_I2H_TRUNK_SCN:
3461 		bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3462 		break;
3463 
3464 	case BFI_FCPORT_I2H_STATS_GET_RSP:
3465 		/*
3466 		 * check for timer pop before processing the rsp
3467 		 */
3468 		if (fcport->stats_busy == BFA_FALSE ||
3469 		    fcport->stats_status == BFA_STATUS_ETIMER)
3470 			break;
3471 
3472 		bfa_timer_stop(&fcport->timer);
3473 		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3474 		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3475 				__bfa_cb_fcport_stats_get, fcport);
3476 		break;
3477 
3478 	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3479 		/*
3480 		 * check for timer pop before processing the rsp
3481 		 */
3482 		if (fcport->stats_busy == BFA_FALSE ||
3483 		    fcport->stats_status == BFA_STATUS_ETIMER)
3484 			break;
3485 
3486 		bfa_timer_stop(&fcport->timer);
3487 		fcport->stats_status = BFA_STATUS_OK;
3488 		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3489 				__bfa_cb_fcport_stats_clr, fcport);
3490 		break;
3491 
3492 	case BFI_FCPORT_I2H_ENABLE_AEN:
3493 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3494 		break;
3495 
3496 	case BFI_FCPORT_I2H_DISABLE_AEN:
3497 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3498 		break;
3499 
3500 	default:
3501 		bfa_assert(0);
3502 	break;
3503 	}
3504 }
3505 
3506 
3507 
3508 /*
3509  *  hal_port_api
3510  */
3511 
3512 /*
3513  * Registered callback for port events.
3514  */
3515 void
3516 bfa_fcport_event_register(struct bfa_s *bfa,
3517 				void (*cbfn) (void *cbarg,
3518 				enum bfa_port_linkstate event),
3519 				void *cbarg)
3520 {
3521 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3522 
3523 	fcport->event_cbfn = cbfn;
3524 	fcport->event_cbarg = cbarg;
3525 }
3526 
3527 bfa_status_t
3528 bfa_fcport_enable(struct bfa_s *bfa)
3529 {
3530 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3531 
3532 	if (bfa_ioc_is_disabled(&bfa->ioc))
3533 		return BFA_STATUS_IOC_DISABLED;
3534 
3535 	if (fcport->diag_busy)
3536 		return BFA_STATUS_DIAG_BUSY;
3537 
3538 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3539 	return BFA_STATUS_OK;
3540 }
3541 
3542 bfa_status_t
3543 bfa_fcport_disable(struct bfa_s *bfa)
3544 {
3545 
3546 	if (bfa_ioc_is_disabled(&bfa->ioc))
3547 		return BFA_STATUS_IOC_DISABLED;
3548 
3549 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3550 	return BFA_STATUS_OK;
3551 }
3552 
3553 /*
3554  * Configure port speed.
3555  */
3556 bfa_status_t
3557 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3558 {
3559 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3560 
3561 	bfa_trc(bfa, speed);
3562 
3563 	if (fcport->cfg.trunked == BFA_TRUE)
3564 		return BFA_STATUS_TRUNK_ENABLED;
3565 	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3566 		bfa_trc(bfa, fcport->speed_sup);
3567 		return BFA_STATUS_UNSUPP_SPEED;
3568 	}
3569 
3570 	fcport->cfg.speed = speed;
3571 
3572 	return BFA_STATUS_OK;
3573 }
3574 
3575 /*
3576  * Get current speed.
3577  */
3578 enum bfa_port_speed
3579 bfa_fcport_get_speed(struct bfa_s *bfa)
3580 {
3581 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3582 
3583 	return fcport->speed;
3584 }
3585 
3586 /*
3587  * Configure port topology.
3588  */
3589 bfa_status_t
3590 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3591 {
3592 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3593 
3594 	bfa_trc(bfa, topology);
3595 	bfa_trc(bfa, fcport->cfg.topology);
3596 
3597 	switch (topology) {
3598 	case BFA_PORT_TOPOLOGY_P2P:
3599 	case BFA_PORT_TOPOLOGY_LOOP:
3600 	case BFA_PORT_TOPOLOGY_AUTO:
3601 		break;
3602 
3603 	default:
3604 		return BFA_STATUS_EINVAL;
3605 	}
3606 
3607 	fcport->cfg.topology = topology;
3608 	return BFA_STATUS_OK;
3609 }
3610 
3611 /*
3612  * Get current topology.
3613  */
3614 enum bfa_port_topology
3615 bfa_fcport_get_topology(struct bfa_s *bfa)
3616 {
3617 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3618 
3619 	return fcport->topology;
3620 }
3621 
3622 bfa_status_t
3623 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3624 {
3625 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3626 
3627 	bfa_trc(bfa, alpa);
3628 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3629 	bfa_trc(bfa, fcport->cfg.hardalpa);
3630 
3631 	fcport->cfg.cfg_hardalpa = BFA_TRUE;
3632 	fcport->cfg.hardalpa = alpa;
3633 
3634 	return BFA_STATUS_OK;
3635 }
3636 
3637 bfa_status_t
3638 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3639 {
3640 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3641 
3642 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3643 	bfa_trc(bfa, fcport->cfg.hardalpa);
3644 
3645 	fcport->cfg.cfg_hardalpa = BFA_FALSE;
3646 	return BFA_STATUS_OK;
3647 }
3648 
3649 bfa_boolean_t
3650 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3651 {
3652 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3653 
3654 	*alpa = fcport->cfg.hardalpa;
3655 	return fcport->cfg.cfg_hardalpa;
3656 }
3657 
3658 u8
3659 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3660 {
3661 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3662 
3663 	return fcport->myalpa;
3664 }
3665 
3666 bfa_status_t
3667 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3668 {
3669 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3670 
3671 	bfa_trc(bfa, maxfrsize);
3672 	bfa_trc(bfa, fcport->cfg.maxfrsize);
3673 
3674 	/* with in range */
3675 	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3676 		return BFA_STATUS_INVLD_DFSZ;
3677 
3678 	/* power of 2, if not the max frame size of 2112 */
3679 	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3680 		return BFA_STATUS_INVLD_DFSZ;
3681 
3682 	fcport->cfg.maxfrsize = maxfrsize;
3683 	return BFA_STATUS_OK;
3684 }
3685 
3686 u16
3687 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3688 {
3689 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3690 
3691 	return fcport->cfg.maxfrsize;
3692 }
3693 
3694 u8
3695 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3696 {
3697 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3698 
3699 	return fcport->cfg.rx_bbcredit;
3700 }
3701 
3702 void
3703 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3704 {
3705 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3706 
3707 	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3708 	bfa_fcport_send_txcredit(fcport);
3709 }
3710 
3711 /*
3712  * Get port attributes.
3713  */
3714 
3715 wwn_t
3716 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3717 {
3718 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3719 	if (node)
3720 		return fcport->nwwn;
3721 	else
3722 		return fcport->pwwn;
3723 }
3724 
3725 void
3726 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3727 {
3728 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3729 
3730 	memset(attr, 0, sizeof(struct bfa_port_attr_s));
3731 
3732 	attr->nwwn = fcport->nwwn;
3733 	attr->pwwn = fcport->pwwn;
3734 
3735 	attr->factorypwwn =  bfa_ioc_get_mfg_pwwn(&bfa->ioc);
3736 	attr->factorynwwn =  bfa_ioc_get_mfg_nwwn(&bfa->ioc);
3737 
3738 	memcpy(&attr->pport_cfg, &fcport->cfg,
3739 		sizeof(struct bfa_port_cfg_s));
3740 	/* speed attributes */
3741 	attr->pport_cfg.speed = fcport->cfg.speed;
3742 	attr->speed_supported = fcport->speed_sup;
3743 	attr->speed = fcport->speed;
3744 	attr->cos_supported = FC_CLASS_3;
3745 
3746 	/* topology attributes */
3747 	attr->pport_cfg.topology = fcport->cfg.topology;
3748 	attr->topology = fcport->topology;
3749 	attr->pport_cfg.trunked = fcport->cfg.trunked;
3750 
3751 	/* beacon attributes */
3752 	attr->beacon = fcport->beacon;
3753 	attr->link_e2e_beacon = fcport->link_e2e_beacon;
3754 	attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
3755 	attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3756 
3757 	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3758 	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3759 	attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3760 	if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3761 		attr->port_state = BFA_PORT_ST_IOCDIS;
3762 	else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3763 		attr->port_state = BFA_PORT_ST_FWMISMATCH;
3764 
3765 	/* FCoE vlan */
3766 	attr->fcoe_vlan = fcport->fcoe_vlan;
3767 }
3768 
3769 #define BFA_FCPORT_STATS_TOV	1000
3770 
3771 /*
3772  * Fetch port statistics (FCQoS or FCoE).
3773  */
3774 bfa_status_t
3775 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3776 	bfa_cb_port_t cbfn, void *cbarg)
3777 {
3778 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3779 
3780 	if (fcport->stats_busy) {
3781 		bfa_trc(bfa, fcport->stats_busy);
3782 		return BFA_STATUS_DEVBUSY;
3783 	}
3784 
3785 	fcport->stats_busy  = BFA_TRUE;
3786 	fcport->stats_ret   = stats;
3787 	fcport->stats_cbfn  = cbfn;
3788 	fcport->stats_cbarg = cbarg;
3789 
3790 	bfa_fcport_send_stats_get(fcport);
3791 
3792 	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3793 			fcport, BFA_FCPORT_STATS_TOV);
3794 	return BFA_STATUS_OK;
3795 }
3796 
3797 /*
3798  * Reset port statistics (FCQoS or FCoE).
3799  */
3800 bfa_status_t
3801 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3802 {
3803 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3804 
3805 	if (fcport->stats_busy) {
3806 		bfa_trc(bfa, fcport->stats_busy);
3807 		return BFA_STATUS_DEVBUSY;
3808 	}
3809 
3810 	fcport->stats_busy  = BFA_TRUE;
3811 	fcport->stats_cbfn  = cbfn;
3812 	fcport->stats_cbarg = cbarg;
3813 
3814 	bfa_fcport_send_stats_clear(fcport);
3815 
3816 	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3817 			fcport, BFA_FCPORT_STATS_TOV);
3818 	return BFA_STATUS_OK;
3819 }
3820 
3821 /*
3822  * Fetch FCQoS port statistics
3823  */
3824 bfa_status_t
3825 bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3826 	bfa_cb_port_t cbfn, void *cbarg)
3827 {
3828 	/* Meaningful only for FC mode */
3829 	bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3830 
3831 	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3832 }
3833 
3834 /*
3835  * Reset FCoE port statistics
3836  */
3837 bfa_status_t
3838 bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3839 {
3840 	/* Meaningful only for FC mode */
3841 	bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3842 
3843 	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3844 }
3845 
3846 /*
3847  * Fetch FCQoS port statistics
3848  */
3849 bfa_status_t
3850 bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3851 	bfa_cb_port_t cbfn, void *cbarg)
3852 {
3853 	/* Meaningful only for FCoE mode */
3854 	bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3855 
3856 	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3857 }
3858 
3859 /*
3860  * Reset FCoE port statistics
3861  */
3862 bfa_status_t
3863 bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3864 {
3865 	/* Meaningful only for FCoE mode */
3866 	bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3867 
3868 	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3869 }
3870 
3871 void
3872 bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
3873 {
3874 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3875 
3876 	qos_attr->state = fcport->qos_attr.state;
3877 	qos_attr->total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
3878 }
3879 
3880 void
3881 bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
3882 	struct bfa_qos_vc_attr_s *qos_vc_attr)
3883 {
3884 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3885 	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
3886 	u32 i = 0;
3887 
3888 	qos_vc_attr->total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
3889 	qos_vc_attr->shared_credit  = be16_to_cpu(bfa_vc_attr->shared_credit);
3890 	qos_vc_attr->elp_opmode_flags  =
3891 			be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
3892 
3893 	/* Individual VC info */
3894 	while (i < qos_vc_attr->total_vc_count) {
3895 		qos_vc_attr->vc_info[i].vc_credit	=
3896 				bfa_vc_attr->vc_info[i].vc_credit;
3897 		qos_vc_attr->vc_info[i].borrow_credit	=
3898 				bfa_vc_attr->vc_info[i].borrow_credit;
3899 		qos_vc_attr->vc_info[i].priority	=
3900 				bfa_vc_attr->vc_info[i].priority;
3901 		++i;
3902 	}
3903 }
3904 
3905 /*
3906  * Fetch port attributes.
3907  */
3908 bfa_boolean_t
3909 bfa_fcport_is_disabled(struct bfa_s *bfa)
3910 {
3911 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3912 
3913 	return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3914 		BFA_PORT_ST_DISABLED;
3915 
3916 }
3917 
3918 bfa_boolean_t
3919 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3920 {
3921 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3922 
3923 	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3924 
3925 }
3926 
3927 void
3928 bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
3929 {
3930 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3931 	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
3932 
3933 	bfa_trc(bfa, on_off);
3934 	bfa_trc(bfa, fcport->cfg.qos_enabled);
3935 
3936 	bfa_trc(bfa, ioc_type);
3937 
3938 	if (ioc_type == BFA_IOC_TYPE_FC) {
3939 		fcport->cfg.qos_enabled = on_off;
3940 		/*
3941 		 * Notify fcpim of the change in QoS state
3942 		 */
3943 		bfa_fcpim_update_ioredirect(bfa);
3944 	}
3945 }
3946 
3947 void
3948 bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
3949 {
3950 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3951 
3952 	bfa_trc(bfa, on_off);
3953 	bfa_trc(bfa, fcport->cfg.ratelimit);
3954 
3955 	fcport->cfg.ratelimit = on_off;
3956 	if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
3957 		fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
3958 }
3959 
3960 /*
3961  * Configure default minimum ratelim speed
3962  */
3963 bfa_status_t
3964 bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3965 {
3966 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3967 
3968 	bfa_trc(bfa, speed);
3969 
3970 	/* Auto and speeds greater than the supported speed, are invalid */
3971 	if ((speed == BFA_PORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
3972 		bfa_trc(bfa, fcport->speed_sup);
3973 		return BFA_STATUS_UNSUPP_SPEED;
3974 	}
3975 
3976 	fcport->cfg.trl_def_speed = speed;
3977 
3978 	return BFA_STATUS_OK;
3979 }
3980 
3981 /*
3982  * Get default minimum ratelim speed
3983  */
3984 enum bfa_port_speed
3985 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3986 {
3987 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3988 
3989 	bfa_trc(bfa, fcport->cfg.trl_def_speed);
3990 	return fcport->cfg.trl_def_speed;
3991 
3992 }
3993 void
3994 bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
3995 {
3996 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3997 
3998 	bfa_trc(bfa, status);
3999 	bfa_trc(bfa, fcport->diag_busy);
4000 
4001 	fcport->diag_busy = status;
4002 }
4003 
4004 void
4005 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4006 	bfa_boolean_t link_e2e_beacon)
4007 {
4008 	struct bfa_s *bfa = dev;
4009 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4010 
4011 	bfa_trc(bfa, beacon);
4012 	bfa_trc(bfa, link_e2e_beacon);
4013 	bfa_trc(bfa, fcport->beacon);
4014 	bfa_trc(bfa, fcport->link_e2e_beacon);
4015 
4016 	fcport->beacon = beacon;
4017 	fcport->link_e2e_beacon = link_e2e_beacon;
4018 }
4019 
4020 bfa_boolean_t
4021 bfa_fcport_is_linkup(struct bfa_s *bfa)
4022 {
4023 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4024 
4025 	return	(!fcport->cfg.trunked &&
4026 		 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4027 		(fcport->cfg.trunked &&
4028 		 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4029 }
4030 
4031 bfa_boolean_t
4032 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4033 {
4034 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4035 
4036 	return fcport->cfg.qos_enabled;
4037 }
4038 
4039 bfa_status_t
4040 bfa_trunk_get_attr(struct bfa_s *bfa, struct bfa_trunk_attr_s *attr)
4041 
4042 {
4043 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4044 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4045 
4046 	bfa_trc(bfa, fcport->cfg.trunked);
4047 	bfa_trc(bfa, trunk->attr.state);
4048 	*attr = trunk->attr;
4049 	attr->port_id = bfa_lps_get_base_pid(bfa);
4050 
4051 	return BFA_STATUS_OK;
4052 }
4053 
4054 void
4055 bfa_trunk_enable_cfg(struct bfa_s *bfa)
4056 {
4057 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4058 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4059 
4060 	bfa_trc(bfa, 1);
4061 	trunk->attr.state = BFA_TRUNK_OFFLINE;
4062 	fcport->cfg.trunked = BFA_TRUE;
4063 }
4064 
4065 bfa_status_t
4066 bfa_trunk_enable(struct bfa_s *bfa)
4067 {
4068 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4069 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4070 
4071 	bfa_trc(bfa, 1);
4072 
4073 	trunk->attr.state   = BFA_TRUNK_OFFLINE;
4074 	bfa_fcport_disable(bfa);
4075 	fcport->cfg.trunked = BFA_TRUE;
4076 	bfa_fcport_enable(bfa);
4077 
4078 	return BFA_STATUS_OK;
4079 }
4080 
4081 bfa_status_t
4082 bfa_trunk_disable(struct bfa_s *bfa)
4083 {
4084 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4085 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4086 
4087 	bfa_trc(bfa, 0);
4088 	trunk->attr.state   = BFA_TRUNK_DISABLED;
4089 	bfa_fcport_disable(bfa);
4090 	fcport->cfg.trunked = BFA_FALSE;
4091 	bfa_fcport_enable(bfa);
4092 	return BFA_STATUS_OK;
4093 }
4094 
4095 
4096 /*
4097  * Rport State machine functions
4098  */
4099 /*
4100  * Beginning state, only online event expected.
4101  */
4102 static void
4103 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4104 {
4105 	bfa_trc(rp->bfa, rp->rport_tag);
4106 	bfa_trc(rp->bfa, event);
4107 
4108 	switch (event) {
4109 	case BFA_RPORT_SM_CREATE:
4110 		bfa_stats(rp, sm_un_cr);
4111 		bfa_sm_set_state(rp, bfa_rport_sm_created);
4112 		break;
4113 
4114 	default:
4115 		bfa_stats(rp, sm_un_unexp);
4116 		bfa_sm_fault(rp->bfa, event);
4117 	}
4118 }
4119 
4120 static void
4121 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4122 {
4123 	bfa_trc(rp->bfa, rp->rport_tag);
4124 	bfa_trc(rp->bfa, event);
4125 
4126 	switch (event) {
4127 	case BFA_RPORT_SM_ONLINE:
4128 		bfa_stats(rp, sm_cr_on);
4129 		if (bfa_rport_send_fwcreate(rp))
4130 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4131 		else
4132 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4133 		break;
4134 
4135 	case BFA_RPORT_SM_DELETE:
4136 		bfa_stats(rp, sm_cr_del);
4137 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4138 		bfa_rport_free(rp);
4139 		break;
4140 
4141 	case BFA_RPORT_SM_HWFAIL:
4142 		bfa_stats(rp, sm_cr_hwf);
4143 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4144 		break;
4145 
4146 	default:
4147 		bfa_stats(rp, sm_cr_unexp);
4148 		bfa_sm_fault(rp->bfa, event);
4149 	}
4150 }
4151 
4152 /*
4153  * Waiting for rport create response from firmware.
4154  */
4155 static void
4156 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4157 {
4158 	bfa_trc(rp->bfa, rp->rport_tag);
4159 	bfa_trc(rp->bfa, event);
4160 
4161 	switch (event) {
4162 	case BFA_RPORT_SM_FWRSP:
4163 		bfa_stats(rp, sm_fwc_rsp);
4164 		bfa_sm_set_state(rp, bfa_rport_sm_online);
4165 		bfa_rport_online_cb(rp);
4166 		break;
4167 
4168 	case BFA_RPORT_SM_DELETE:
4169 		bfa_stats(rp, sm_fwc_del);
4170 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4171 		break;
4172 
4173 	case BFA_RPORT_SM_OFFLINE:
4174 		bfa_stats(rp, sm_fwc_off);
4175 		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4176 		break;
4177 
4178 	case BFA_RPORT_SM_HWFAIL:
4179 		bfa_stats(rp, sm_fwc_hwf);
4180 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4181 		break;
4182 
4183 	default:
4184 		bfa_stats(rp, sm_fwc_unexp);
4185 		bfa_sm_fault(rp->bfa, event);
4186 	}
4187 }
4188 
4189 /*
4190  * Request queue is full, awaiting queue resume to send create request.
4191  */
4192 static void
4193 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4194 {
4195 	bfa_trc(rp->bfa, rp->rport_tag);
4196 	bfa_trc(rp->bfa, event);
4197 
4198 	switch (event) {
4199 	case BFA_RPORT_SM_QRESUME:
4200 		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4201 		bfa_rport_send_fwcreate(rp);
4202 		break;
4203 
4204 	case BFA_RPORT_SM_DELETE:
4205 		bfa_stats(rp, sm_fwc_del);
4206 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4207 		bfa_reqq_wcancel(&rp->reqq_wait);
4208 		bfa_rport_free(rp);
4209 		break;
4210 
4211 	case BFA_RPORT_SM_OFFLINE:
4212 		bfa_stats(rp, sm_fwc_off);
4213 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4214 		bfa_reqq_wcancel(&rp->reqq_wait);
4215 		bfa_rport_offline_cb(rp);
4216 		break;
4217 
4218 	case BFA_RPORT_SM_HWFAIL:
4219 		bfa_stats(rp, sm_fwc_hwf);
4220 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4221 		bfa_reqq_wcancel(&rp->reqq_wait);
4222 		break;
4223 
4224 	default:
4225 		bfa_stats(rp, sm_fwc_unexp);
4226 		bfa_sm_fault(rp->bfa, event);
4227 	}
4228 }
4229 
4230 /*
4231  * Online state - normal parking state.
4232  */
4233 static void
4234 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4235 {
4236 	struct bfi_rport_qos_scn_s *qos_scn;
4237 
4238 	bfa_trc(rp->bfa, rp->rport_tag);
4239 	bfa_trc(rp->bfa, event);
4240 
4241 	switch (event) {
4242 	case BFA_RPORT_SM_OFFLINE:
4243 		bfa_stats(rp, sm_on_off);
4244 		if (bfa_rport_send_fwdelete(rp))
4245 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4246 		else
4247 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4248 		break;
4249 
4250 	case BFA_RPORT_SM_DELETE:
4251 		bfa_stats(rp, sm_on_del);
4252 		if (bfa_rport_send_fwdelete(rp))
4253 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4254 		else
4255 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4256 		break;
4257 
4258 	case BFA_RPORT_SM_HWFAIL:
4259 		bfa_stats(rp, sm_on_hwf);
4260 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4261 		break;
4262 
4263 	case BFA_RPORT_SM_SET_SPEED:
4264 		bfa_rport_send_fwspeed(rp);
4265 		break;
4266 
4267 	case BFA_RPORT_SM_QOS_SCN:
4268 		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4269 		rp->qos_attr = qos_scn->new_qos_attr;
4270 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4271 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4272 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4273 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4274 
4275 		qos_scn->old_qos_attr.qos_flow_id  =
4276 			be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4277 		qos_scn->new_qos_attr.qos_flow_id  =
4278 			be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4279 
4280 		if (qos_scn->old_qos_attr.qos_flow_id !=
4281 			qos_scn->new_qos_attr.qos_flow_id)
4282 			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4283 						    qos_scn->old_qos_attr,
4284 						    qos_scn->new_qos_attr);
4285 		if (qos_scn->old_qos_attr.qos_priority !=
4286 			qos_scn->new_qos_attr.qos_priority)
4287 			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4288 						  qos_scn->old_qos_attr,
4289 						  qos_scn->new_qos_attr);
4290 		break;
4291 
4292 	default:
4293 		bfa_stats(rp, sm_on_unexp);
4294 		bfa_sm_fault(rp->bfa, event);
4295 	}
4296 }
4297 
4298 /*
4299  * Firmware rport is being deleted - awaiting f/w response.
4300  */
4301 static void
4302 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4303 {
4304 	bfa_trc(rp->bfa, rp->rport_tag);
4305 	bfa_trc(rp->bfa, event);
4306 
4307 	switch (event) {
4308 	case BFA_RPORT_SM_FWRSP:
4309 		bfa_stats(rp, sm_fwd_rsp);
4310 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4311 		bfa_rport_offline_cb(rp);
4312 		break;
4313 
4314 	case BFA_RPORT_SM_DELETE:
4315 		bfa_stats(rp, sm_fwd_del);
4316 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4317 		break;
4318 
4319 	case BFA_RPORT_SM_HWFAIL:
4320 		bfa_stats(rp, sm_fwd_hwf);
4321 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4322 		bfa_rport_offline_cb(rp);
4323 		break;
4324 
4325 	default:
4326 		bfa_stats(rp, sm_fwd_unexp);
4327 		bfa_sm_fault(rp->bfa, event);
4328 	}
4329 }
4330 
4331 static void
4332 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4333 {
4334 	bfa_trc(rp->bfa, rp->rport_tag);
4335 	bfa_trc(rp->bfa, event);
4336 
4337 	switch (event) {
4338 	case BFA_RPORT_SM_QRESUME:
4339 		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4340 		bfa_rport_send_fwdelete(rp);
4341 		break;
4342 
4343 	case BFA_RPORT_SM_DELETE:
4344 		bfa_stats(rp, sm_fwd_del);
4345 		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4346 		break;
4347 
4348 	case BFA_RPORT_SM_HWFAIL:
4349 		bfa_stats(rp, sm_fwd_hwf);
4350 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4351 		bfa_reqq_wcancel(&rp->reqq_wait);
4352 		bfa_rport_offline_cb(rp);
4353 		break;
4354 
4355 	default:
4356 		bfa_stats(rp, sm_fwd_unexp);
4357 		bfa_sm_fault(rp->bfa, event);
4358 	}
4359 }
4360 
4361 /*
4362  * Offline state.
4363  */
4364 static void
4365 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4366 {
4367 	bfa_trc(rp->bfa, rp->rport_tag);
4368 	bfa_trc(rp->bfa, event);
4369 
4370 	switch (event) {
4371 	case BFA_RPORT_SM_DELETE:
4372 		bfa_stats(rp, sm_off_del);
4373 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4374 		bfa_rport_free(rp);
4375 		break;
4376 
4377 	case BFA_RPORT_SM_ONLINE:
4378 		bfa_stats(rp, sm_off_on);
4379 		if (bfa_rport_send_fwcreate(rp))
4380 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4381 		else
4382 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4383 		break;
4384 
4385 	case BFA_RPORT_SM_HWFAIL:
4386 		bfa_stats(rp, sm_off_hwf);
4387 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4388 		break;
4389 
4390 	default:
4391 		bfa_stats(rp, sm_off_unexp);
4392 		bfa_sm_fault(rp->bfa, event);
4393 	}
4394 }
4395 
4396 /*
4397  * Rport is deleted, waiting for firmware response to delete.
4398  */
4399 static void
4400 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4401 {
4402 	bfa_trc(rp->bfa, rp->rport_tag);
4403 	bfa_trc(rp->bfa, event);
4404 
4405 	switch (event) {
4406 	case BFA_RPORT_SM_FWRSP:
4407 		bfa_stats(rp, sm_del_fwrsp);
4408 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4409 		bfa_rport_free(rp);
4410 		break;
4411 
4412 	case BFA_RPORT_SM_HWFAIL:
4413 		bfa_stats(rp, sm_del_hwf);
4414 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4415 		bfa_rport_free(rp);
4416 		break;
4417 
4418 	default:
4419 		bfa_sm_fault(rp->bfa, event);
4420 	}
4421 }
4422 
4423 static void
4424 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4425 {
4426 	bfa_trc(rp->bfa, rp->rport_tag);
4427 	bfa_trc(rp->bfa, event);
4428 
4429 	switch (event) {
4430 	case BFA_RPORT_SM_QRESUME:
4431 		bfa_stats(rp, sm_del_fwrsp);
4432 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4433 		bfa_rport_send_fwdelete(rp);
4434 		break;
4435 
4436 	case BFA_RPORT_SM_HWFAIL:
4437 		bfa_stats(rp, sm_del_hwf);
4438 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4439 		bfa_reqq_wcancel(&rp->reqq_wait);
4440 		bfa_rport_free(rp);
4441 		break;
4442 
4443 	default:
4444 		bfa_sm_fault(rp->bfa, event);
4445 	}
4446 }
4447 
4448 /*
4449  * Waiting for rport create response from firmware. A delete is pending.
4450  */
4451 static void
4452 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4453 				enum bfa_rport_event event)
4454 {
4455 	bfa_trc(rp->bfa, rp->rport_tag);
4456 	bfa_trc(rp->bfa, event);
4457 
4458 	switch (event) {
4459 	case BFA_RPORT_SM_FWRSP:
4460 		bfa_stats(rp, sm_delp_fwrsp);
4461 		if (bfa_rport_send_fwdelete(rp))
4462 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4463 		else
4464 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4465 		break;
4466 
4467 	case BFA_RPORT_SM_HWFAIL:
4468 		bfa_stats(rp, sm_delp_hwf);
4469 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4470 		bfa_rport_free(rp);
4471 		break;
4472 
4473 	default:
4474 		bfa_stats(rp, sm_delp_unexp);
4475 		bfa_sm_fault(rp->bfa, event);
4476 	}
4477 }
4478 
4479 /*
4480  * Waiting for rport create response from firmware. Rport offline is pending.
4481  */
4482 static void
4483 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4484 				 enum bfa_rport_event event)
4485 {
4486 	bfa_trc(rp->bfa, rp->rport_tag);
4487 	bfa_trc(rp->bfa, event);
4488 
4489 	switch (event) {
4490 	case BFA_RPORT_SM_FWRSP:
4491 		bfa_stats(rp, sm_offp_fwrsp);
4492 		if (bfa_rport_send_fwdelete(rp))
4493 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4494 		else
4495 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4496 		break;
4497 
4498 	case BFA_RPORT_SM_DELETE:
4499 		bfa_stats(rp, sm_offp_del);
4500 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4501 		break;
4502 
4503 	case BFA_RPORT_SM_HWFAIL:
4504 		bfa_stats(rp, sm_offp_hwf);
4505 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4506 		break;
4507 
4508 	default:
4509 		bfa_stats(rp, sm_offp_unexp);
4510 		bfa_sm_fault(rp->bfa, event);
4511 	}
4512 }
4513 
4514 /*
4515  * IOC h/w failed.
4516  */
4517 static void
4518 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4519 {
4520 	bfa_trc(rp->bfa, rp->rport_tag);
4521 	bfa_trc(rp->bfa, event);
4522 
4523 	switch (event) {
4524 	case BFA_RPORT_SM_OFFLINE:
4525 		bfa_stats(rp, sm_iocd_off);
4526 		bfa_rport_offline_cb(rp);
4527 		break;
4528 
4529 	case BFA_RPORT_SM_DELETE:
4530 		bfa_stats(rp, sm_iocd_del);
4531 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4532 		bfa_rport_free(rp);
4533 		break;
4534 
4535 	case BFA_RPORT_SM_ONLINE:
4536 		bfa_stats(rp, sm_iocd_on);
4537 		if (bfa_rport_send_fwcreate(rp))
4538 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4539 		else
4540 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4541 		break;
4542 
4543 	case BFA_RPORT_SM_HWFAIL:
4544 		break;
4545 
4546 	default:
4547 		bfa_stats(rp, sm_iocd_unexp);
4548 		bfa_sm_fault(rp->bfa, event);
4549 	}
4550 }
4551 
4552 
4553 
4554 /*
4555  *  bfa_rport_private BFA rport private functions
4556  */
4557 
4558 static void
4559 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4560 {
4561 	struct bfa_rport_s *rp = cbarg;
4562 
4563 	if (complete)
4564 		bfa_cb_rport_online(rp->rport_drv);
4565 }
4566 
4567 static void
4568 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4569 {
4570 	struct bfa_rport_s *rp = cbarg;
4571 
4572 	if (complete)
4573 		bfa_cb_rport_offline(rp->rport_drv);
4574 }
4575 
4576 static void
4577 bfa_rport_qresume(void *cbarg)
4578 {
4579 	struct bfa_rport_s	*rp = cbarg;
4580 
4581 	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4582 }
4583 
4584 static void
4585 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4586 		u32 *dm_len)
4587 {
4588 	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4589 		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4590 
4591 	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4592 }
4593 
4594 static void
4595 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4596 		     struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4597 {
4598 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4599 	struct bfa_rport_s *rp;
4600 	u16 i;
4601 
4602 	INIT_LIST_HEAD(&mod->rp_free_q);
4603 	INIT_LIST_HEAD(&mod->rp_active_q);
4604 
4605 	rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4606 	mod->rps_list = rp;
4607 	mod->num_rports = cfg->fwcfg.num_rports;
4608 
4609 	bfa_assert(mod->num_rports &&
4610 		   !(mod->num_rports & (mod->num_rports - 1)));
4611 
4612 	for (i = 0; i < mod->num_rports; i++, rp++) {
4613 		memset(rp, 0, sizeof(struct bfa_rport_s));
4614 		rp->bfa = bfa;
4615 		rp->rport_tag = i;
4616 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4617 
4618 		/*
4619 		 *  - is unused
4620 		 */
4621 		if (i)
4622 			list_add_tail(&rp->qe, &mod->rp_free_q);
4623 
4624 		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4625 	}
4626 
4627 	/*
4628 	 * consume memory
4629 	 */
4630 	bfa_meminfo_kva(meminfo) = (u8 *) rp;
4631 }
4632 
4633 static void
4634 bfa_rport_detach(struct bfa_s *bfa)
4635 {
4636 }
4637 
4638 static void
4639 bfa_rport_start(struct bfa_s *bfa)
4640 {
4641 }
4642 
4643 static void
4644 bfa_rport_stop(struct bfa_s *bfa)
4645 {
4646 }
4647 
4648 static void
4649 bfa_rport_iocdisable(struct bfa_s *bfa)
4650 {
4651 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4652 	struct bfa_rport_s *rport;
4653 	struct list_head *qe, *qen;
4654 
4655 	list_for_each_safe(qe, qen, &mod->rp_active_q) {
4656 		rport = (struct bfa_rport_s *) qe;
4657 		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4658 	}
4659 }
4660 
4661 static struct bfa_rport_s *
4662 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4663 {
4664 	struct bfa_rport_s *rport;
4665 
4666 	bfa_q_deq(&mod->rp_free_q, &rport);
4667 	if (rport)
4668 		list_add_tail(&rport->qe, &mod->rp_active_q);
4669 
4670 	return rport;
4671 }
4672 
4673 static void
4674 bfa_rport_free(struct bfa_rport_s *rport)
4675 {
4676 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4677 
4678 	bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4679 	list_del(&rport->qe);
4680 	list_add_tail(&rport->qe, &mod->rp_free_q);
4681 }
4682 
4683 static bfa_boolean_t
4684 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4685 {
4686 	struct bfi_rport_create_req_s *m;
4687 
4688 	/*
4689 	 * check for room in queue to send request now
4690 	 */
4691 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4692 	if (!m) {
4693 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4694 		return BFA_FALSE;
4695 	}
4696 
4697 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4698 			bfa_lpuid(rp->bfa));
4699 	m->bfa_handle = rp->rport_tag;
4700 	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4701 	m->pid = rp->rport_info.pid;
4702 	m->lp_tag = rp->rport_info.lp_tag;
4703 	m->local_pid = rp->rport_info.local_pid;
4704 	m->fc_class = rp->rport_info.fc_class;
4705 	m->vf_en = rp->rport_info.vf_en;
4706 	m->vf_id = rp->rport_info.vf_id;
4707 	m->cisc = rp->rport_info.cisc;
4708 
4709 	/*
4710 	 * queue I/O message to firmware
4711 	 */
4712 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4713 	return BFA_TRUE;
4714 }
4715 
4716 static bfa_boolean_t
4717 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4718 {
4719 	struct bfi_rport_delete_req_s *m;
4720 
4721 	/*
4722 	 * check for room in queue to send request now
4723 	 */
4724 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4725 	if (!m) {
4726 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4727 		return BFA_FALSE;
4728 	}
4729 
4730 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4731 			bfa_lpuid(rp->bfa));
4732 	m->fw_handle = rp->fw_handle;
4733 
4734 	/*
4735 	 * queue I/O message to firmware
4736 	 */
4737 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4738 	return BFA_TRUE;
4739 }
4740 
4741 static bfa_boolean_t
4742 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4743 {
4744 	struct bfa_rport_speed_req_s *m;
4745 
4746 	/*
4747 	 * check for room in queue to send request now
4748 	 */
4749 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4750 	if (!m) {
4751 		bfa_trc(rp->bfa, rp->rport_info.speed);
4752 		return BFA_FALSE;
4753 	}
4754 
4755 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4756 			bfa_lpuid(rp->bfa));
4757 	m->fw_handle = rp->fw_handle;
4758 	m->speed = (u8)rp->rport_info.speed;
4759 
4760 	/*
4761 	 * queue I/O message to firmware
4762 	 */
4763 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4764 	return BFA_TRUE;
4765 }
4766 
4767 
4768 
4769 /*
4770  *  bfa_rport_public
4771  */
4772 
4773 /*
4774  * Rport interrupt processing.
4775  */
4776 void
4777 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4778 {
4779 	union bfi_rport_i2h_msg_u msg;
4780 	struct bfa_rport_s *rp;
4781 
4782 	bfa_trc(bfa, m->mhdr.msg_id);
4783 
4784 	msg.msg = m;
4785 
4786 	switch (m->mhdr.msg_id) {
4787 	case BFI_RPORT_I2H_CREATE_RSP:
4788 		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4789 		rp->fw_handle = msg.create_rsp->fw_handle;
4790 		rp->qos_attr = msg.create_rsp->qos_attr;
4791 		bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4792 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4793 		break;
4794 
4795 	case BFI_RPORT_I2H_DELETE_RSP:
4796 		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4797 		bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4798 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4799 		break;
4800 
4801 	case BFI_RPORT_I2H_QOS_SCN:
4802 		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4803 		rp->event_arg.fw_msg = msg.qos_scn_evt;
4804 		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4805 		break;
4806 
4807 	default:
4808 		bfa_trc(bfa, m->mhdr.msg_id);
4809 		bfa_assert(0);
4810 	}
4811 }
4812 
4813 
4814 
4815 /*
4816  *  bfa_rport_api
4817  */
4818 
4819 struct bfa_rport_s *
4820 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4821 {
4822 	struct bfa_rport_s *rp;
4823 
4824 	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4825 
4826 	if (rp == NULL)
4827 		return NULL;
4828 
4829 	rp->bfa = bfa;
4830 	rp->rport_drv = rport_drv;
4831 	bfa_rport_clear_stats(rp);
4832 
4833 	bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4834 	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4835 
4836 	return rp;
4837 }
4838 
4839 void
4840 bfa_rport_delete(struct bfa_rport_s *rport)
4841 {
4842 	bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
4843 }
4844 
4845 void
4846 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4847 {
4848 	bfa_assert(rport_info->max_frmsz != 0);
4849 
4850 	/*
4851 	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4852 	 * responses. Default to minimum size.
4853 	 */
4854 	if (rport_info->max_frmsz == 0) {
4855 		bfa_trc(rport->bfa, rport->rport_tag);
4856 		rport_info->max_frmsz = FC_MIN_PDUSZ;
4857 	}
4858 
4859 	rport->rport_info = *rport_info;
4860 	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4861 }
4862 
4863 void
4864 bfa_rport_offline(struct bfa_rport_s *rport)
4865 {
4866 	bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
4867 }
4868 
4869 void
4870 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4871 {
4872 	bfa_assert(speed != 0);
4873 	bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4874 
4875 	rport->rport_info.speed = speed;
4876 	bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4877 }
4878 
4879 void
4880 bfa_rport_get_stats(struct bfa_rport_s *rport,
4881 	struct bfa_rport_hal_stats_s *stats)
4882 {
4883 	*stats = rport->stats;
4884 }
4885 
4886 void
4887 bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
4888 					struct bfa_rport_qos_attr_s *qos_attr)
4889 {
4890 	qos_attr->qos_priority  = rport->qos_attr.qos_priority;
4891 	qos_attr->qos_flow_id  = be32_to_cpu(rport->qos_attr.qos_flow_id);
4892 
4893 }
4894 
4895 void
4896 bfa_rport_clear_stats(struct bfa_rport_s *rport)
4897 {
4898 	memset(&rport->stats, 0, sizeof(rport->stats));
4899 }
4900 
4901 
4902 /*
4903  * SGPG related functions
4904  */
4905 
4906 /*
4907  * Compute and return memory needed by FCP(im) module.
4908  */
4909 static void
4910 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4911 		u32 *dm_len)
4912 {
4913 	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4914 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4915 
4916 	*km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4917 	*dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4918 }
4919 
4920 
4921 static void
4922 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4923 		    struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4924 {
4925 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4926 	int i;
4927 	struct bfa_sgpg_s *hsgpg;
4928 	struct bfi_sgpg_s *sgpg;
4929 	u64 align_len;
4930 
4931 	union {
4932 		u64 pa;
4933 		union bfi_addr_u addr;
4934 	} sgpg_pa, sgpg_pa_tmp;
4935 
4936 	INIT_LIST_HEAD(&mod->sgpg_q);
4937 	INIT_LIST_HEAD(&mod->sgpg_wait_q);
4938 
4939 	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4940 
4941 	mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4942 	mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4943 	align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4944 	mod->sgpg_arr_pa += align_len;
4945 	mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4946 						align_len);
4947 	mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4948 						align_len);
4949 
4950 	hsgpg = mod->hsgpg_arr;
4951 	sgpg = mod->sgpg_arr;
4952 	sgpg_pa.pa = mod->sgpg_arr_pa;
4953 	mod->free_sgpgs = mod->num_sgpgs;
4954 
4955 	bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4956 
4957 	for (i = 0; i < mod->num_sgpgs; i++) {
4958 		memset(hsgpg, 0, sizeof(*hsgpg));
4959 		memset(sgpg, 0, sizeof(*sgpg));
4960 
4961 		hsgpg->sgpg = sgpg;
4962 		sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4963 		hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4964 		list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4965 
4966 		hsgpg++;
4967 		sgpg++;
4968 		sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4969 	}
4970 
4971 	bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4972 	bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4973 	bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4974 }
4975 
4976 static void
4977 bfa_sgpg_detach(struct bfa_s *bfa)
4978 {
4979 }
4980 
4981 static void
4982 bfa_sgpg_start(struct bfa_s *bfa)
4983 {
4984 }
4985 
4986 static void
4987 bfa_sgpg_stop(struct bfa_s *bfa)
4988 {
4989 }
4990 
4991 static void
4992 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4993 {
4994 }
4995 
4996 
4997 
4998 /*
4999  *  hal_sgpg_public BFA SGPG public functions
5000  */
5001 
5002 bfa_status_t
5003 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5004 {
5005 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5006 	struct bfa_sgpg_s *hsgpg;
5007 	int i;
5008 
5009 	bfa_trc_fp(bfa, nsgpgs);
5010 
5011 	if (mod->free_sgpgs < nsgpgs)
5012 		return BFA_STATUS_ENOMEM;
5013 
5014 	for (i = 0; i < nsgpgs; i++) {
5015 		bfa_q_deq(&mod->sgpg_q, &hsgpg);
5016 		bfa_assert(hsgpg);
5017 		list_add_tail(&hsgpg->qe, sgpg_q);
5018 	}
5019 
5020 	mod->free_sgpgs -= nsgpgs;
5021 	return BFA_STATUS_OK;
5022 }
5023 
5024 void
5025 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5026 {
5027 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5028 	struct bfa_sgpg_wqe_s *wqe;
5029 
5030 	bfa_trc_fp(bfa, nsgpg);
5031 
5032 	mod->free_sgpgs += nsgpg;
5033 	bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
5034 
5035 	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5036 
5037 	if (list_empty(&mod->sgpg_wait_q))
5038 		return;
5039 
5040 	/*
5041 	 * satisfy as many waiting requests as possible
5042 	 */
5043 	do {
5044 		wqe = bfa_q_first(&mod->sgpg_wait_q);
5045 		if (mod->free_sgpgs < wqe->nsgpg)
5046 			nsgpg = mod->free_sgpgs;
5047 		else
5048 			nsgpg = wqe->nsgpg;
5049 		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5050 		wqe->nsgpg -= nsgpg;
5051 		if (wqe->nsgpg == 0) {
5052 			list_del(&wqe->qe);
5053 			wqe->cbfn(wqe->cbarg);
5054 		}
5055 	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5056 }
5057 
5058 void
5059 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5060 {
5061 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5062 
5063 	bfa_assert(nsgpg > 0);
5064 	bfa_assert(nsgpg > mod->free_sgpgs);
5065 
5066 	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5067 
5068 	/*
5069 	 * allocate any left to this one first
5070 	 */
5071 	if (mod->free_sgpgs) {
5072 		/*
5073 		 * no one else is waiting for SGPG
5074 		 */
5075 		bfa_assert(list_empty(&mod->sgpg_wait_q));
5076 		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5077 		wqe->nsgpg -= mod->free_sgpgs;
5078 		mod->free_sgpgs = 0;
5079 	}
5080 
5081 	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5082 }
5083 
5084 void
5085 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5086 {
5087 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5088 
5089 	bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5090 	list_del(&wqe->qe);
5091 
5092 	if (wqe->nsgpg_total != wqe->nsgpg)
5093 		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5094 				   wqe->nsgpg_total - wqe->nsgpg);
5095 }
5096 
5097 void
5098 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5099 		   void *cbarg)
5100 {
5101 	INIT_LIST_HEAD(&wqe->sgpg_q);
5102 	wqe->cbfn = cbfn;
5103 	wqe->cbarg = cbarg;
5104 }
5105 
5106 /*
5107  *  UF related functions
5108  */
5109 /*
5110  *****************************************************************************
5111  * Internal functions
5112  *****************************************************************************
5113  */
5114 static void
5115 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5116 {
5117 	struct bfa_uf_s   *uf = cbarg;
5118 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5119 
5120 	if (complete)
5121 		ufm->ufrecv(ufm->cbarg, uf);
5122 }
5123 
5124 static void
5125 claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5126 {
5127 	u32 uf_pb_tot_sz;
5128 
5129 	ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
5130 	ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
5131 	uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
5132 							BFA_DMA_ALIGN_SZ);
5133 
5134 	bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
5135 	bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
5136 
5137 	memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
5138 }
5139 
5140 static void
5141 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5142 {
5143 	struct bfi_uf_buf_post_s *uf_bp_msg;
5144 	struct bfi_sge_s      *sge;
5145 	union bfi_addr_u      sga_zero = { {0} };
5146 	u16 i;
5147 	u16 buf_len;
5148 
5149 	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
5150 	uf_bp_msg = ufm->uf_buf_posts;
5151 
5152 	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5153 	     i++, uf_bp_msg++) {
5154 		memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5155 
5156 		uf_bp_msg->buf_tag = i;
5157 		buf_len = sizeof(struct bfa_uf_buf_s);
5158 		uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5159 		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5160 			    bfa_lpuid(ufm->bfa));
5161 
5162 		sge = uf_bp_msg->sge;
5163 		sge[0].sg_len = buf_len;
5164 		sge[0].flags = BFI_SGE_DATA_LAST;
5165 		bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
5166 		bfa_sge_to_be(sge);
5167 
5168 		sge[1].sg_len = buf_len;
5169 		sge[1].flags = BFI_SGE_PGDLEN;
5170 		sge[1].sga = sga_zero;
5171 		bfa_sge_to_be(&sge[1]);
5172 	}
5173 
5174 	/*
5175 	 * advance pointer beyond consumed memory
5176 	 */
5177 	bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
5178 }
5179 
5180 static void
5181 claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5182 {
5183 	u16 i;
5184 	struct bfa_uf_s   *uf;
5185 
5186 	/*
5187 	 * Claim block of memory for UF list
5188 	 */
5189 	ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
5190 
5191 	/*
5192 	 * Initialize UFs and queue it in UF free queue
5193 	 */
5194 	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5195 		memset(uf, 0, sizeof(struct bfa_uf_s));
5196 		uf->bfa = ufm->bfa;
5197 		uf->uf_tag = i;
5198 		uf->pb_len = sizeof(struct bfa_uf_buf_s);
5199 		uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
5200 		uf->buf_pa = ufm_pbs_pa(ufm, i);
5201 		list_add_tail(&uf->qe, &ufm->uf_free_q);
5202 	}
5203 
5204 	/*
5205 	 * advance memory pointer
5206 	 */
5207 	bfa_meminfo_kva(mi) = (u8 *) uf;
5208 }
5209 
5210 static void
5211 uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5212 {
5213 	claim_uf_pbs(ufm, mi);
5214 	claim_ufs(ufm, mi);
5215 	claim_uf_post_msgs(ufm, mi);
5216 }
5217 
5218 static void
5219 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
5220 {
5221 	u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5222 
5223 	/*
5224 	 * dma-able memory for UF posted bufs
5225 	 */
5226 	*dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
5227 							BFA_DMA_ALIGN_SZ);
5228 
5229 	/*
5230 	 * kernel Virtual memory for UFs and UF buf post msg copies
5231 	 */
5232 	*ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
5233 	*ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
5234 }
5235 
5236 static void
5237 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5238 		  struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
5239 {
5240 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5241 
5242 	memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
5243 	ufm->bfa = bfa;
5244 	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5245 	INIT_LIST_HEAD(&ufm->uf_free_q);
5246 	INIT_LIST_HEAD(&ufm->uf_posted_q);
5247 
5248 	uf_mem_claim(ufm, meminfo);
5249 }
5250 
5251 static void
5252 bfa_uf_detach(struct bfa_s *bfa)
5253 {
5254 }
5255 
5256 static struct bfa_uf_s *
5257 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5258 {
5259 	struct bfa_uf_s   *uf;
5260 
5261 	bfa_q_deq(&uf_mod->uf_free_q, &uf);
5262 	return uf;
5263 }
5264 
5265 static void
5266 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5267 {
5268 	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5269 }
5270 
5271 static bfa_status_t
5272 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5273 {
5274 	struct bfi_uf_buf_post_s *uf_post_msg;
5275 
5276 	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5277 	if (!uf_post_msg)
5278 		return BFA_STATUS_FAILED;
5279 
5280 	memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5281 		      sizeof(struct bfi_uf_buf_post_s));
5282 	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
5283 
5284 	bfa_trc(ufm->bfa, uf->uf_tag);
5285 
5286 	list_add_tail(&uf->qe, &ufm->uf_posted_q);
5287 	return BFA_STATUS_OK;
5288 }
5289 
5290 static void
5291 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5292 {
5293 	struct bfa_uf_s   *uf;
5294 
5295 	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5296 		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5297 			break;
5298 	}
5299 }
5300 
5301 static void
5302 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5303 {
5304 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5305 	u16 uf_tag = m->buf_tag;
5306 	struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
5307 	struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5308 	u8 *buf = &uf_buf->d[0];
5309 	struct fchs_s *fchs;
5310 
5311 	m->frm_len = be16_to_cpu(m->frm_len);
5312 	m->xfr_len = be16_to_cpu(m->xfr_len);
5313 
5314 	fchs = (struct fchs_s *)uf_buf;
5315 
5316 	list_del(&uf->qe);	/* dequeue from posted queue */
5317 
5318 	uf->data_ptr = buf;
5319 	uf->data_len = m->xfr_len;
5320 
5321 	bfa_assert(uf->data_len >= sizeof(struct fchs_s));
5322 
5323 	if (uf->data_len == sizeof(struct fchs_s)) {
5324 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5325 			       uf->data_len, (struct fchs_s *)buf);
5326 	} else {
5327 		u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5328 		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5329 				      BFA_PL_EID_RX, uf->data_len,
5330 				      (struct fchs_s *)buf, pld_w0);
5331 	}
5332 
5333 	if (bfa->fcs)
5334 		__bfa_cb_uf_recv(uf, BFA_TRUE);
5335 	else
5336 		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5337 }
5338 
5339 static void
5340 bfa_uf_stop(struct bfa_s *bfa)
5341 {
5342 }
5343 
5344 static void
5345 bfa_uf_iocdisable(struct bfa_s *bfa)
5346 {
5347 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5348 	struct bfa_uf_s *uf;
5349 	struct list_head *qe, *qen;
5350 
5351 	list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5352 		uf = (struct bfa_uf_s *) qe;
5353 		list_del(&uf->qe);
5354 		bfa_uf_put(ufm, uf);
5355 	}
5356 }
5357 
5358 static void
5359 bfa_uf_start(struct bfa_s *bfa)
5360 {
5361 	bfa_uf_post_all(BFA_UF_MOD(bfa));
5362 }
5363 
5364 
5365 
5366 /*
5367  *  hal_uf_api
5368  */
5369 
5370 /*
5371  * Register handler for all unsolicted recieve frames.
5372  *
5373  * @param[in]	bfa		BFA instance
5374  * @param[in]	ufrecv	receive handler function
5375  * @param[in]	cbarg	receive handler arg
5376  */
5377 void
5378 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5379 {
5380 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5381 
5382 	ufm->ufrecv = ufrecv;
5383 	ufm->cbarg = cbarg;
5384 }
5385 
5386 /*
5387  *	Free an unsolicited frame back to BFA.
5388  *
5389  * @param[in]		uf		unsolicited frame to be freed
5390  *
5391  * @return None
5392  */
5393 void
5394 bfa_uf_free(struct bfa_uf_s *uf)
5395 {
5396 	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5397 	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5398 }
5399 
5400 
5401 
5402 /*
5403  *  uf_pub BFA uf module public functions
5404  */
5405 void
5406 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5407 {
5408 	bfa_trc(bfa, msg->mhdr.msg_id);
5409 
5410 	switch (msg->mhdr.msg_id) {
5411 	case BFI_UF_I2H_FRM_RCVD:
5412 		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5413 		break;
5414 
5415 	default:
5416 		bfa_trc(bfa, msg->mhdr.msg_id);
5417 		bfa_assert(0);
5418 	}
5419 }
5420 
5421 
5422