1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <asm/param.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/string.h>
20 #include <linux/workqueue.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #define __PREVENT_DUMP_MEM_ARR__
25 #define __PREVENT_PXP_GLOBAL_WIN__
26 #include "qed.h"
27 #include "qed_cxt.h"
28 #include "qed_dev_api.h"
29 #include "qed_fcoe.h"
30 #include "qed_hsi.h"
31 #include "qed_hw.h"
32 #include "qed_int.h"
33 #include "qed_ll2.h"
34 #include "qed_mcp.h"
35 #include "qed_reg_addr.h"
36 #include "qed_sp.h"
37 #include "qed_sriov.h"
38 #include <linux/qed/qed_fcoe_if.h>
39 
40 struct qed_fcoe_conn {
41 	struct list_head list_entry;
42 	bool free_on_delete;
43 
44 	u16 conn_id;
45 	u32 icid;
46 	u32 fw_cid;
47 	u8 layer_code;
48 
49 	dma_addr_t sq_pbl_addr;
50 	dma_addr_t sq_curr_page_addr;
51 	dma_addr_t sq_next_page_addr;
52 	dma_addr_t xferq_pbl_addr;
53 	void *xferq_pbl_addr_virt_addr;
54 	dma_addr_t xferq_addr[4];
55 	void *xferq_addr_virt_addr[4];
56 	dma_addr_t confq_pbl_addr;
57 	void *confq_pbl_addr_virt_addr;
58 	dma_addr_t confq_addr[2];
59 	void *confq_addr_virt_addr[2];
60 
61 	dma_addr_t terminate_params;
62 
63 	u16 dst_mac_addr_lo;
64 	u16 dst_mac_addr_mid;
65 	u16 dst_mac_addr_hi;
66 	u16 src_mac_addr_lo;
67 	u16 src_mac_addr_mid;
68 	u16 src_mac_addr_hi;
69 
70 	u16 tx_max_fc_pay_len;
71 	u16 e_d_tov_timer_val;
72 	u16 rec_tov_timer_val;
73 	u16 rx_max_fc_pay_len;
74 	u16 vlan_tag;
75 	u16 physical_q0;
76 
77 	struct fc_addr_nw s_id;
78 	u8 max_conc_seqs_c3;
79 	struct fc_addr_nw d_id;
80 	u8 flags;
81 	u8 def_q_idx;
82 };
83 
84 static int
85 qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
86 		       enum spq_mode comp_mode,
87 		       struct qed_spq_comp_cb *p_comp_addr)
88 {
89 	struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
90 	struct fcoe_init_ramrod_params *p_ramrod = NULL;
91 	struct fcoe_init_func_ramrod_data *p_data;
92 	struct e4_fcoe_conn_context *p_cxt = NULL;
93 	struct qed_spq_entry *p_ent = NULL;
94 	struct qed_sp_init_data init_data;
95 	struct qed_cxt_info cxt_info;
96 	u32 dummy_cid;
97 	int rc = 0;
98 	u16 tmp;
99 	u8 i;
100 
101 	/* Get SPQ entry */
102 	memset(&init_data, 0, sizeof(init_data));
103 	init_data.cid = qed_spq_get_cid(p_hwfn);
104 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
105 	init_data.comp_mode = comp_mode;
106 	init_data.p_comp_data = p_comp_addr;
107 
108 	rc = qed_sp_init_request(p_hwfn, &p_ent,
109 				 FCOE_RAMROD_CMD_ID_INIT_FUNC,
110 				 PROTOCOLID_FCOE, &init_data);
111 	if (rc)
112 		return rc;
113 
114 	p_ramrod = &p_ent->ramrod.fcoe_init;
115 	p_data = &p_ramrod->init_ramrod_data;
116 	fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
117 
118 	/* Sanity */
119 	if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) {
120 		DP_ERR(p_hwfn,
121 		       "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
122 		       fcoe_pf_params->num_cqs,
123 		       p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
124 		rc = -EINVAL;
125 		goto err;
126 	}
127 
128 	p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
129 	tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
130 	p_data->sq_num_pages_in_pbl = tmp;
131 
132 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
133 	if (rc)
134 		goto err;
135 
136 	cxt_info.iid = dummy_cid;
137 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
138 	if (rc) {
139 		DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
140 			  dummy_cid);
141 		goto err;
142 	}
143 	p_cxt = cxt_info.p_cxt;
144 	memset(p_cxt, 0, sizeof(*p_cxt));
145 
146 	SET_FIELD(p_cxt->tstorm_ag_context.flags3,
147 		  E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
148 
149 	fcoe_pf_params->dummy_icid = (u16)dummy_cid;
150 
151 	tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
152 	p_data->func_params.num_tasks = tmp;
153 	p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
154 	p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
155 
156 	DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
157 		       fcoe_pf_params->glbl_q_params_addr);
158 
159 	tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
160 	p_data->q_params.cq_num_entries = tmp;
161 
162 	tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
163 	p_data->q_params.cmdq_num_entries = tmp;
164 
165 	tmp = fcoe_pf_params->num_cqs;
166 	p_data->q_params.num_queues = (u8)tmp;
167 
168 	tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
169 	p_data->q_params.queue_relative_offset = (u8)tmp;
170 
171 	for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
172 		u16 igu_sb_id;
173 
174 		igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
175 		tmp = cpu_to_le16(igu_sb_id);
176 		p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
177 	}
178 
179 	p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
180 	p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
181 
182 	p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
183 
184 	DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
185 		       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
186 	p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
187 	    fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
188 	tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
189 	p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
190 	tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
191 	p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
192 
193 	DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
194 		       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
195 	p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
196 	    fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
197 	tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
198 	p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
199 	tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
200 	p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
201 	tmp = fcoe_pf_params->rq_buffer_size;
202 	p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
203 
204 	if (fcoe_pf_params->is_target) {
205 		SET_FIELD(p_data->q_params.q_validity,
206 			  SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
207 		if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
208 			SET_FIELD(p_data->q_params.q_validity,
209 				  SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
210 		SET_FIELD(p_data->q_params.q_validity,
211 			  SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
212 	} else {
213 		SET_FIELD(p_data->q_params.q_validity,
214 			  SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
215 	}
216 
217 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
218 
219 	return rc;
220 
221 err:
222 	qed_sp_destroy_request(p_hwfn, p_ent);
223 	return rc;
224 }
225 
226 static int
227 qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
228 			 struct qed_fcoe_conn *p_conn,
229 			 enum spq_mode comp_mode,
230 			 struct qed_spq_comp_cb *p_comp_addr)
231 {
232 	struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
233 	struct fcoe_conn_offload_ramrod_data *p_data;
234 	struct qed_spq_entry *p_ent = NULL;
235 	struct qed_sp_init_data init_data;
236 	u16 physical_q0, tmp;
237 	int rc;
238 
239 	/* Get SPQ entry */
240 	memset(&init_data, 0, sizeof(init_data));
241 	init_data.cid = p_conn->icid;
242 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
243 	init_data.comp_mode = comp_mode;
244 	init_data.p_comp_data = p_comp_addr;
245 
246 	rc = qed_sp_init_request(p_hwfn, &p_ent,
247 				 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
248 				 PROTOCOLID_FCOE, &init_data);
249 	if (rc)
250 		return rc;
251 
252 	p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
253 	p_data = &p_ramrod->offload_ramrod_data;
254 
255 	/* Transmission PQ is the first of the PF */
256 	physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
257 	p_conn->physical_q0 = cpu_to_le16(physical_q0);
258 	p_data->physical_q0 = cpu_to_le16(physical_q0);
259 
260 	p_data->conn_id = cpu_to_le16(p_conn->conn_id);
261 	DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
262 	DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
263 	DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
264 	DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
265 	DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
266 	DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
267 
268 	DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
269 	DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
270 	DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
271 
272 	p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
273 	p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
274 	p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
275 	p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
276 	p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
277 	p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
278 
279 	tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
280 	p_data->tx_max_fc_pay_len = tmp;
281 	tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
282 	p_data->e_d_tov_timer_val = tmp;
283 	tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
284 	p_data->rec_rr_tov_timer_val = tmp;
285 	tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
286 	p_data->rx_max_fc_pay_len = tmp;
287 
288 	p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
289 	p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
290 	p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
291 	p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
292 	p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
293 	p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
294 	p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
295 	p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
296 	p_data->flags = p_conn->flags;
297 	if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
298 		SET_FIELD(p_data->flags,
299 			  FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
300 	p_data->def_q_idx = p_conn->def_q_idx;
301 
302 	return qed_spq_post(p_hwfn, p_ent, NULL);
303 }
304 
305 static int
306 qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
307 			 struct qed_fcoe_conn *p_conn,
308 			 enum spq_mode comp_mode,
309 			 struct qed_spq_comp_cb *p_comp_addr)
310 {
311 	struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
312 	struct qed_spq_entry *p_ent = NULL;
313 	struct qed_sp_init_data init_data;
314 	int rc = 0;
315 
316 	/* Get SPQ entry */
317 	memset(&init_data, 0, sizeof(init_data));
318 	init_data.cid = p_conn->icid;
319 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
320 	init_data.comp_mode = comp_mode;
321 	init_data.p_comp_data = p_comp_addr;
322 
323 	rc = qed_sp_init_request(p_hwfn, &p_ent,
324 				 FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
325 				 PROTOCOLID_FCOE, &init_data);
326 	if (rc)
327 		return rc;
328 
329 	p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
330 	DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
331 		       p_conn->terminate_params);
332 
333 	return qed_spq_post(p_hwfn, p_ent, NULL);
334 }
335 
336 static int
337 qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
338 		      struct qed_ptt *p_ptt,
339 		      enum spq_mode comp_mode,
340 		      struct qed_spq_comp_cb *p_comp_addr)
341 {
342 	struct qed_spq_entry *p_ent = NULL;
343 	struct qed_sp_init_data init_data;
344 	u32 active_segs = 0;
345 	int rc = 0;
346 
347 	/* Get SPQ entry */
348 	memset(&init_data, 0, sizeof(init_data));
349 	init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
350 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
351 	init_data.comp_mode = comp_mode;
352 	init_data.p_comp_data = p_comp_addr;
353 
354 	rc = qed_sp_init_request(p_hwfn, &p_ent,
355 				 FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
356 				 PROTOCOLID_FCOE, &init_data);
357 	if (rc)
358 		return rc;
359 
360 	active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
361 	active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
362 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
363 
364 	return qed_spq_post(p_hwfn, p_ent, NULL);
365 }
366 
367 static int
368 qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
369 			     struct qed_fcoe_conn **p_out_conn)
370 {
371 	struct qed_fcoe_conn *p_conn = NULL;
372 	void *p_addr;
373 	u32 i;
374 
375 	spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
376 	if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
377 		p_conn =
378 		    list_first_entry(&p_hwfn->p_fcoe_info->free_list,
379 				     struct qed_fcoe_conn, list_entry);
380 	if (p_conn) {
381 		list_del(&p_conn->list_entry);
382 		spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
383 		*p_out_conn = p_conn;
384 		return 0;
385 	}
386 	spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
387 
388 	p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
389 	if (!p_conn)
390 		return -ENOMEM;
391 
392 	p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
393 				    QED_CHAIN_PAGE_SIZE,
394 				    &p_conn->xferq_pbl_addr, GFP_KERNEL);
395 	if (!p_addr)
396 		goto nomem_pbl_xferq;
397 	p_conn->xferq_pbl_addr_virt_addr = p_addr;
398 
399 	for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
400 		p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
401 					    QED_CHAIN_PAGE_SIZE,
402 					    &p_conn->xferq_addr[i], GFP_KERNEL);
403 		if (!p_addr)
404 			goto nomem_xferq;
405 		p_conn->xferq_addr_virt_addr[i] = p_addr;
406 
407 		p_addr = p_conn->xferq_pbl_addr_virt_addr;
408 		((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
409 	}
410 
411 	p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
412 				    QED_CHAIN_PAGE_SIZE,
413 				    &p_conn->confq_pbl_addr, GFP_KERNEL);
414 	if (!p_addr)
415 		goto nomem_xferq;
416 	p_conn->confq_pbl_addr_virt_addr = p_addr;
417 
418 	for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
419 		p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
420 					    QED_CHAIN_PAGE_SIZE,
421 					    &p_conn->confq_addr[i], GFP_KERNEL);
422 		if (!p_addr)
423 			goto nomem_confq;
424 		p_conn->confq_addr_virt_addr[i] = p_addr;
425 
426 		p_addr = p_conn->confq_pbl_addr_virt_addr;
427 		((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
428 	}
429 
430 	p_conn->free_on_delete = true;
431 	*p_out_conn = p_conn;
432 	return 0;
433 
434 nomem_confq:
435 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
436 			  QED_CHAIN_PAGE_SIZE,
437 			  p_conn->confq_pbl_addr_virt_addr,
438 			  p_conn->confq_pbl_addr);
439 	for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
440 		if (p_conn->confq_addr_virt_addr[i])
441 			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
442 					  QED_CHAIN_PAGE_SIZE,
443 					  p_conn->confq_addr_virt_addr[i],
444 					  p_conn->confq_addr[i]);
445 nomem_xferq:
446 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
447 			  QED_CHAIN_PAGE_SIZE,
448 			  p_conn->xferq_pbl_addr_virt_addr,
449 			  p_conn->xferq_pbl_addr);
450 	for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
451 		if (p_conn->xferq_addr_virt_addr[i])
452 			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
453 					  QED_CHAIN_PAGE_SIZE,
454 					  p_conn->xferq_addr_virt_addr[i],
455 					  p_conn->xferq_addr[i]);
456 nomem_pbl_xferq:
457 	kfree(p_conn);
458 	return -ENOMEM;
459 }
460 
461 static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
462 				     struct qed_fcoe_conn *p_conn)
463 {
464 	u32 i;
465 
466 	if (!p_conn)
467 		return;
468 
469 	if (p_conn->confq_pbl_addr_virt_addr)
470 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
471 				  QED_CHAIN_PAGE_SIZE,
472 				  p_conn->confq_pbl_addr_virt_addr,
473 				  p_conn->confq_pbl_addr);
474 
475 	for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
476 		if (!p_conn->confq_addr_virt_addr[i])
477 			continue;
478 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
479 				  QED_CHAIN_PAGE_SIZE,
480 				  p_conn->confq_addr_virt_addr[i],
481 				  p_conn->confq_addr[i]);
482 	}
483 
484 	if (p_conn->xferq_pbl_addr_virt_addr)
485 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
486 				  QED_CHAIN_PAGE_SIZE,
487 				  p_conn->xferq_pbl_addr_virt_addr,
488 				  p_conn->xferq_pbl_addr);
489 
490 	for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
491 		if (!p_conn->xferq_addr_virt_addr[i])
492 			continue;
493 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
494 				  QED_CHAIN_PAGE_SIZE,
495 				  p_conn->xferq_addr_virt_addr[i],
496 				  p_conn->xferq_addr[i]);
497 	}
498 	kfree(p_conn);
499 }
500 
501 static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
502 {
503 	return (u8 __iomem *)p_hwfn->doorbells +
504 	       qed_db_addr(cid, DQ_DEMS_LEGACY);
505 }
506 
507 static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
508 						   u8 bdq_id)
509 {
510 	if (RESC_NUM(p_hwfn, QED_BDQ)) {
511 		return (u8 __iomem *)p_hwfn->regview +
512 		       GTT_BAR0_MAP_REG_MSDM_RAM +
513 		       MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
514 								  QED_BDQ),
515 						       bdq_id);
516 	} else {
517 		DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
518 		return NULL;
519 	}
520 }
521 
522 static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
523 						     u8 bdq_id)
524 {
525 	if (RESC_NUM(p_hwfn, QED_BDQ)) {
526 		return (u8 __iomem *)p_hwfn->regview +
527 		       GTT_BAR0_MAP_REG_TSDM_RAM +
528 		       TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
529 								  QED_BDQ),
530 						       bdq_id);
531 	} else {
532 		DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
533 		return NULL;
534 	}
535 }
536 
537 int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
538 {
539 	struct qed_fcoe_info *p_fcoe_info;
540 
541 	/* Allocate LL2's set struct */
542 	p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
543 	if (!p_fcoe_info) {
544 		DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
545 		return -ENOMEM;
546 	}
547 	INIT_LIST_HEAD(&p_fcoe_info->free_list);
548 
549 	p_hwfn->p_fcoe_info = p_fcoe_info;
550 	return 0;
551 }
552 
553 void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
554 {
555 	struct e4_fcoe_task_context *p_task_ctx = NULL;
556 	int rc;
557 	u32 i;
558 
559 	spin_lock_init(&p_hwfn->p_fcoe_info->lock);
560 	for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
561 		rc = qed_cxt_get_task_ctx(p_hwfn, i,
562 					  QED_CTX_WORKING_MEM,
563 					  (void **)&p_task_ctx);
564 		if (rc)
565 			continue;
566 
567 		memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
568 		SET_FIELD(p_task_ctx->timer_context.logical_client_0,
569 			  TIMERS_CONTEXT_VALIDLC0, 1);
570 		SET_FIELD(p_task_ctx->timer_context.logical_client_1,
571 			  TIMERS_CONTEXT_VALIDLC1, 1);
572 		SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
573 			  E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
574 	}
575 }
576 
577 void qed_fcoe_free(struct qed_hwfn *p_hwfn)
578 {
579 	struct qed_fcoe_conn *p_conn = NULL;
580 
581 	if (!p_hwfn->p_fcoe_info)
582 		return;
583 
584 	while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
585 		p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
586 					  struct qed_fcoe_conn, list_entry);
587 		if (!p_conn)
588 			break;
589 		list_del(&p_conn->list_entry);
590 		qed_fcoe_free_connection(p_hwfn, p_conn);
591 	}
592 
593 	kfree(p_hwfn->p_fcoe_info);
594 	p_hwfn->p_fcoe_info = NULL;
595 }
596 
597 static int
598 qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
599 			    struct qed_fcoe_conn *p_in_conn,
600 			    struct qed_fcoe_conn **p_out_conn)
601 {
602 	struct qed_fcoe_conn *p_conn = NULL;
603 	int rc = 0;
604 	u32 icid;
605 
606 	spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
607 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
608 	spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
609 	if (rc)
610 		return rc;
611 
612 	/* Use input connection [if provided] or allocate a new one */
613 	if (p_in_conn) {
614 		p_conn = p_in_conn;
615 	} else {
616 		rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
617 		if (rc) {
618 			spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
619 			qed_cxt_release_cid(p_hwfn, icid);
620 			spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
621 			return rc;
622 		}
623 	}
624 
625 	p_conn->icid = icid;
626 	p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
627 	*p_out_conn = p_conn;
628 
629 	return rc;
630 }
631 
632 static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
633 					struct qed_fcoe_conn *p_conn)
634 {
635 	spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
636 	list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
637 	qed_cxt_release_cid(p_hwfn, p_conn->icid);
638 	spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
639 }
640 
641 static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
642 				 struct qed_ptt *p_ptt,
643 				 struct qed_fcoe_stats *p_stats)
644 {
645 	struct fcoe_rx_stat tstats;
646 	u32 tstats_addr;
647 
648 	memset(&tstats, 0, sizeof(tstats));
649 	tstats_addr = BAR0_MAP_REG_TSDM_RAM +
650 	    TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
651 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
652 
653 	p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
654 	p_stats->fcoe_rx_data_pkt_cnt =
655 	    HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
656 	p_stats->fcoe_rx_xfer_pkt_cnt =
657 	    HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
658 	p_stats->fcoe_rx_other_pkt_cnt =
659 	    HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
660 
661 	p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
662 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
663 	p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
664 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
665 	p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
666 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
667 	p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
668 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
669 	p_stats->fcoe_silent_drop_total_pkt_cnt =
670 	    le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
671 }
672 
673 static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
674 				 struct qed_ptt *p_ptt,
675 				 struct qed_fcoe_stats *p_stats)
676 {
677 	struct fcoe_tx_stat pstats;
678 	u32 pstats_addr;
679 
680 	memset(&pstats, 0, sizeof(pstats));
681 	pstats_addr = BAR0_MAP_REG_PSDM_RAM +
682 	    PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
683 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
684 
685 	p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
686 	p_stats->fcoe_tx_data_pkt_cnt =
687 	    HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
688 	p_stats->fcoe_tx_xfer_pkt_cnt =
689 	    HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
690 	p_stats->fcoe_tx_other_pkt_cnt =
691 	    HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
692 }
693 
694 static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
695 			      struct qed_fcoe_stats *p_stats)
696 {
697 	struct qed_ptt *p_ptt;
698 
699 	memset(p_stats, 0, sizeof(*p_stats));
700 
701 	p_ptt = qed_ptt_acquire(p_hwfn);
702 
703 	if (!p_ptt) {
704 		DP_ERR(p_hwfn, "Failed to acquire ptt\n");
705 		return -EINVAL;
706 	}
707 
708 	_qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
709 	_qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
710 
711 	qed_ptt_release(p_hwfn, p_ptt);
712 
713 	return 0;
714 }
715 
716 struct qed_hash_fcoe_con {
717 	struct hlist_node node;
718 	struct qed_fcoe_conn *con;
719 };
720 
721 static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
722 				  struct qed_dev_fcoe_info *info)
723 {
724 	struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
725 	int rc;
726 
727 	memset(info, 0, sizeof(*info));
728 	rc = qed_fill_dev_info(cdev, &info->common);
729 
730 	info->primary_dbq_rq_addr =
731 	    qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
732 	info->secondary_bdq_rq_addr =
733 	    qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
734 
735 	info->wwpn = hwfn->mcp_info->func_info.wwn_port;
736 	info->wwnn = hwfn->mcp_info->func_info.wwn_node;
737 
738 	info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ);
739 
740 	return rc;
741 }
742 
743 static void qed_register_fcoe_ops(struct qed_dev *cdev,
744 				  struct qed_fcoe_cb_ops *ops, void *cookie)
745 {
746 	cdev->protocol_ops.fcoe = ops;
747 	cdev->ops_cookie = cookie;
748 }
749 
750 static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
751 						   u32 handle)
752 {
753 	struct qed_hash_fcoe_con *hash_con = NULL;
754 
755 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
756 		return NULL;
757 
758 	hash_for_each_possible(cdev->connections, hash_con, node, handle) {
759 		if (hash_con->con->icid == handle)
760 			break;
761 	}
762 
763 	if (!hash_con || (hash_con->con->icid != handle))
764 		return NULL;
765 
766 	return hash_con;
767 }
768 
769 static int qed_fcoe_stop(struct qed_dev *cdev)
770 {
771 	struct qed_ptt *p_ptt;
772 	int rc;
773 
774 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
775 		DP_NOTICE(cdev, "fcoe already stopped\n");
776 		return 0;
777 	}
778 
779 	if (!hash_empty(cdev->connections)) {
780 		DP_NOTICE(cdev,
781 			  "Can't stop fcoe - not all connections were returned\n");
782 		return -EINVAL;
783 	}
784 
785 	p_ptt = qed_ptt_acquire(QED_AFFIN_HWFN(cdev));
786 	if (!p_ptt)
787 		return -EAGAIN;
788 
789 	/* Stop the fcoe */
790 	rc = qed_sp_fcoe_func_stop(QED_AFFIN_HWFN(cdev), p_ptt,
791 				   QED_SPQ_MODE_EBLOCK, NULL);
792 	cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
793 	qed_ptt_release(QED_AFFIN_HWFN(cdev), p_ptt);
794 
795 	return rc;
796 }
797 
798 static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
799 {
800 	int rc;
801 
802 	if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
803 		DP_NOTICE(cdev, "fcoe already started;\n");
804 		return 0;
805 	}
806 
807 	rc = qed_sp_fcoe_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
808 				    NULL);
809 	if (rc) {
810 		DP_NOTICE(cdev, "Failed to start fcoe\n");
811 		return rc;
812 	}
813 
814 	cdev->flags |= QED_FLAG_STORAGE_STARTED;
815 	hash_init(cdev->connections);
816 
817 	if (tasks) {
818 		struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
819 						       GFP_ATOMIC);
820 
821 		if (!tid_info) {
822 			DP_NOTICE(cdev,
823 				  "Failed to allocate tasks information\n");
824 			qed_fcoe_stop(cdev);
825 			return -ENOMEM;
826 		}
827 
828 		rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
829 		if (rc) {
830 			DP_NOTICE(cdev, "Failed to gather task information\n");
831 			qed_fcoe_stop(cdev);
832 			kfree(tid_info);
833 			return rc;
834 		}
835 
836 		/* Fill task information */
837 		tasks->size = tid_info->tid_size;
838 		tasks->num_tids_per_block = tid_info->num_tids_per_block;
839 		memcpy(tasks->blocks, tid_info->blocks,
840 		       MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
841 
842 		kfree(tid_info);
843 	}
844 
845 	return 0;
846 }
847 
848 static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
849 				 u32 *handle,
850 				 u32 *fw_cid, void __iomem **p_doorbell)
851 {
852 	struct qed_hash_fcoe_con *hash_con;
853 	int rc;
854 
855 	/* Allocate a hashed connection */
856 	hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
857 	if (!hash_con) {
858 		DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
859 		return -ENOMEM;
860 	}
861 
862 	/* Acquire the connection */
863 	rc = qed_fcoe_acquire_connection(QED_AFFIN_HWFN(cdev), NULL,
864 					 &hash_con->con);
865 	if (rc) {
866 		DP_NOTICE(cdev, "Failed to acquire Connection\n");
867 		kfree(hash_con);
868 		return rc;
869 	}
870 
871 	/* Added the connection to hash table */
872 	*handle = hash_con->con->icid;
873 	*fw_cid = hash_con->con->fw_cid;
874 	hash_add(cdev->connections, &hash_con->node, *handle);
875 
876 	if (p_doorbell)
877 		*p_doorbell = qed_fcoe_get_db_addr(QED_AFFIN_HWFN(cdev),
878 						   *handle);
879 
880 	return 0;
881 }
882 
883 static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
884 {
885 	struct qed_hash_fcoe_con *hash_con;
886 
887 	hash_con = qed_fcoe_get_hash(cdev, handle);
888 	if (!hash_con) {
889 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
890 			  handle);
891 		return -EINVAL;
892 	}
893 
894 	hlist_del(&hash_con->node);
895 	qed_fcoe_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
896 	kfree(hash_con);
897 
898 	return 0;
899 }
900 
901 static int qed_fcoe_offload_conn(struct qed_dev *cdev,
902 				 u32 handle,
903 				 struct qed_fcoe_params_offload *conn_info)
904 {
905 	struct qed_hash_fcoe_con *hash_con;
906 	struct qed_fcoe_conn *con;
907 
908 	hash_con = qed_fcoe_get_hash(cdev, handle);
909 	if (!hash_con) {
910 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
911 			  handle);
912 		return -EINVAL;
913 	}
914 
915 	/* Update the connection with information from the params */
916 	con = hash_con->con;
917 
918 	con->sq_pbl_addr = conn_info->sq_pbl_addr;
919 	con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
920 	con->sq_next_page_addr = conn_info->sq_next_page_addr;
921 	con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
922 	con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
923 	con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
924 	con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
925 	con->vlan_tag = conn_info->vlan_tag;
926 	con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
927 	con->flags = conn_info->flags;
928 	con->def_q_idx = conn_info->def_q_idx;
929 
930 	con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
931 	    conn_info->src_mac[4];
932 	con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
933 	    conn_info->src_mac[2];
934 	con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
935 	    conn_info->src_mac[0];
936 	con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
937 	    conn_info->dst_mac[4];
938 	con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
939 	    conn_info->dst_mac[2];
940 	con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
941 	    conn_info->dst_mac[0];
942 
943 	con->s_id.addr_hi = conn_info->s_id.addr_hi;
944 	con->s_id.addr_mid = conn_info->s_id.addr_mid;
945 	con->s_id.addr_lo = conn_info->s_id.addr_lo;
946 	con->d_id.addr_hi = conn_info->d_id.addr_hi;
947 	con->d_id.addr_mid = conn_info->d_id.addr_mid;
948 	con->d_id.addr_lo = conn_info->d_id.addr_lo;
949 
950 	return qed_sp_fcoe_conn_offload(QED_AFFIN_HWFN(cdev), con,
951 					QED_SPQ_MODE_EBLOCK, NULL);
952 }
953 
954 static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
955 				 u32 handle, dma_addr_t terminate_params)
956 {
957 	struct qed_hash_fcoe_con *hash_con;
958 	struct qed_fcoe_conn *con;
959 
960 	hash_con = qed_fcoe_get_hash(cdev, handle);
961 	if (!hash_con) {
962 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
963 			  handle);
964 		return -EINVAL;
965 	}
966 
967 	/* Update the connection with information from the params */
968 	con = hash_con->con;
969 	con->terminate_params = terminate_params;
970 
971 	return qed_sp_fcoe_conn_destroy(QED_AFFIN_HWFN(cdev), con,
972 					QED_SPQ_MODE_EBLOCK, NULL);
973 }
974 
975 static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
976 {
977 	return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
978 }
979 
980 void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
981 				 struct qed_mcp_fcoe_stats *stats)
982 {
983 	struct qed_fcoe_stats proto_stats;
984 
985 	/* Retrieve FW statistics */
986 	memset(&proto_stats, 0, sizeof(proto_stats));
987 	if (qed_fcoe_stats(cdev, &proto_stats)) {
988 		DP_VERBOSE(cdev, QED_MSG_STORAGE,
989 			   "Failed to collect FCoE statistics\n");
990 		return;
991 	}
992 
993 	/* Translate FW statistics into struct */
994 	stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
995 			 proto_stats.fcoe_rx_xfer_pkt_cnt +
996 			 proto_stats.fcoe_rx_other_pkt_cnt;
997 	stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
998 			 proto_stats.fcoe_tx_xfer_pkt_cnt +
999 			 proto_stats.fcoe_tx_other_pkt_cnt;
1000 	stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
1001 
1002 	/* Request protocol driver to fill-in the rest */
1003 	if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
1004 		struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
1005 		void *cookie = cdev->ops_cookie;
1006 
1007 		if (ops->get_login_failures)
1008 			stats->login_failure = ops->get_login_failures(cookie);
1009 	}
1010 }
1011 
1012 static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
1013 	.common = &qed_common_ops_pass,
1014 	.ll2 = &qed_ll2_ops_pass,
1015 	.fill_dev_info = &qed_fill_fcoe_dev_info,
1016 	.start = &qed_fcoe_start,
1017 	.stop = &qed_fcoe_stop,
1018 	.register_ops = &qed_register_fcoe_ops,
1019 	.acquire_conn = &qed_fcoe_acquire_conn,
1020 	.release_conn = &qed_fcoe_release_conn,
1021 	.offload_conn = &qed_fcoe_offload_conn,
1022 	.destroy_conn = &qed_fcoe_destroy_conn,
1023 	.get_stats = &qed_fcoe_stats,
1024 };
1025 
1026 const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
1027 {
1028 	return &qed_fcoe_ops_pass;
1029 }
1030 EXPORT_SYMBOL(qed_get_fcoe_ops);
1031 
1032 void qed_put_fcoe_ops(void)
1033 {
1034 }
1035 EXPORT_SYMBOL(qed_put_fcoe_ops);
1036