1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/interrupt.h>
39 #include <linux/kernel.h>
40 #include <linux/log2.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <linux/errno.h>
48 #include <linux/list.h>
49 #include <linux/spinlock.h>
50 #define __PREVENT_DUMP_MEM_ARR__
51 #define __PREVENT_PXP_GLOBAL_WIN__
52 #include "qed.h"
53 #include "qed_cxt.h"
54 #include "qed_dev_api.h"
55 #include "qed_fcoe.h"
56 #include "qed_hsi.h"
57 #include "qed_hw.h"
58 #include "qed_int.h"
59 #include "qed_ll2.h"
60 #include "qed_mcp.h"
61 #include "qed_reg_addr.h"
62 #include "qed_sp.h"
63 #include "qed_sriov.h"
64 #include <linux/qed/qed_fcoe_if.h>
65 
66 struct qed_fcoe_conn {
67 	struct list_head list_entry;
68 	bool free_on_delete;
69 
70 	u16 conn_id;
71 	u32 icid;
72 	u32 fw_cid;
73 	u8 layer_code;
74 
75 	dma_addr_t sq_pbl_addr;
76 	dma_addr_t sq_curr_page_addr;
77 	dma_addr_t sq_next_page_addr;
78 	dma_addr_t xferq_pbl_addr;
79 	void *xferq_pbl_addr_virt_addr;
80 	dma_addr_t xferq_addr[4];
81 	void *xferq_addr_virt_addr[4];
82 	dma_addr_t confq_pbl_addr;
83 	void *confq_pbl_addr_virt_addr;
84 	dma_addr_t confq_addr[2];
85 	void *confq_addr_virt_addr[2];
86 
87 	dma_addr_t terminate_params;
88 
89 	u16 dst_mac_addr_lo;
90 	u16 dst_mac_addr_mid;
91 	u16 dst_mac_addr_hi;
92 	u16 src_mac_addr_lo;
93 	u16 src_mac_addr_mid;
94 	u16 src_mac_addr_hi;
95 
96 	u16 tx_max_fc_pay_len;
97 	u16 e_d_tov_timer_val;
98 	u16 rec_tov_timer_val;
99 	u16 rx_max_fc_pay_len;
100 	u16 vlan_tag;
101 	u16 physical_q0;
102 
103 	struct fc_addr_nw s_id;
104 	u8 max_conc_seqs_c3;
105 	struct fc_addr_nw d_id;
106 	u8 flags;
107 	u8 def_q_idx;
108 };
109 
110 static int
111 qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
112 		       enum spq_mode comp_mode,
113 		       struct qed_spq_comp_cb *p_comp_addr)
114 {
115 	struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
116 	struct fcoe_init_ramrod_params *p_ramrod = NULL;
117 	struct fcoe_init_func_ramrod_data *p_data;
118 	struct fcoe_conn_context *p_cxt = NULL;
119 	struct qed_spq_entry *p_ent = NULL;
120 	struct qed_sp_init_data init_data;
121 	struct qed_cxt_info cxt_info;
122 	u32 dummy_cid;
123 	int rc = 0;
124 	u16 tmp;
125 	u8 i;
126 
127 	/* Get SPQ entry */
128 	memset(&init_data, 0, sizeof(init_data));
129 	init_data.cid = qed_spq_get_cid(p_hwfn);
130 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
131 	init_data.comp_mode = comp_mode;
132 	init_data.p_comp_data = p_comp_addr;
133 
134 	rc = qed_sp_init_request(p_hwfn, &p_ent,
135 				 FCOE_RAMROD_CMD_ID_INIT_FUNC,
136 				 PROTOCOLID_FCOE, &init_data);
137 	if (rc)
138 		return rc;
139 
140 	p_ramrod = &p_ent->ramrod.fcoe_init;
141 	p_data = &p_ramrod->init_ramrod_data;
142 	fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
143 
144 	p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
145 	tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
146 	p_data->sq_num_pages_in_pbl = tmp;
147 
148 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
149 	if (rc)
150 		return rc;
151 
152 	cxt_info.iid = dummy_cid;
153 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
154 	if (rc) {
155 		DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
156 			  dummy_cid);
157 		return rc;
158 	}
159 	p_cxt = cxt_info.p_cxt;
160 	SET_FIELD(p_cxt->tstorm_ag_context.flags3,
161 		  TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
162 
163 	fcoe_pf_params->dummy_icid = (u16)dummy_cid;
164 
165 	tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
166 	p_data->func_params.num_tasks = tmp;
167 	p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
168 	p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
169 
170 	DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
171 		       fcoe_pf_params->glbl_q_params_addr);
172 
173 	tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
174 	p_data->q_params.cq_num_entries = tmp;
175 
176 	tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
177 	p_data->q_params.cmdq_num_entries = tmp;
178 
179 	tmp = fcoe_pf_params->num_cqs;
180 	p_data->q_params.num_queues = (u8)tmp;
181 
182 	tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
183 	p_data->q_params.queue_relative_offset = (u8)tmp;
184 
185 	for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
186 		u16 igu_sb_id;
187 
188 		igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
189 		tmp = cpu_to_le16(igu_sb_id);
190 		p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
191 	}
192 
193 	p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
194 	p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
195 
196 	p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
197 
198 	DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
199 		       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
200 	p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
201 	    fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
202 	tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
203 	p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
204 	tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
205 	p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
206 
207 	DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
208 		       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
209 	p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
210 	    fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
211 	tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
212 	p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
213 	tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
214 	p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
215 	tmp = fcoe_pf_params->rq_buffer_size;
216 	p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
217 
218 	if (fcoe_pf_params->is_target) {
219 		SET_FIELD(p_data->q_params.q_validity,
220 			  SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
221 		if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
222 			SET_FIELD(p_data->q_params.q_validity,
223 				  SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
224 		SET_FIELD(p_data->q_params.q_validity,
225 			  SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
226 	} else {
227 		SET_FIELD(p_data->q_params.q_validity,
228 			  SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
229 	}
230 
231 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
232 
233 	return rc;
234 }
235 
236 static int
237 qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
238 			 struct qed_fcoe_conn *p_conn,
239 			 enum spq_mode comp_mode,
240 			 struct qed_spq_comp_cb *p_comp_addr)
241 {
242 	struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
243 	struct fcoe_conn_offload_ramrod_data *p_data;
244 	struct qed_spq_entry *p_ent = NULL;
245 	struct qed_sp_init_data init_data;
246 	u16 physical_q0, tmp;
247 	int rc;
248 
249 	/* Get SPQ entry */
250 	memset(&init_data, 0, sizeof(init_data));
251 	init_data.cid = p_conn->icid;
252 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
253 	init_data.comp_mode = comp_mode;
254 	init_data.p_comp_data = p_comp_addr;
255 
256 	rc = qed_sp_init_request(p_hwfn, &p_ent,
257 				 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
258 				 PROTOCOLID_FCOE, &init_data);
259 	if (rc)
260 		return rc;
261 
262 	p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
263 	p_data = &p_ramrod->offload_ramrod_data;
264 
265 	/* Transmission PQ is the first of the PF */
266 	physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
267 	p_conn->physical_q0 = cpu_to_le16(physical_q0);
268 	p_data->physical_q0 = cpu_to_le16(physical_q0);
269 
270 	p_data->conn_id = cpu_to_le16(p_conn->conn_id);
271 	DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
272 	DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
273 	DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
274 	DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
275 	DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
276 	DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
277 
278 	DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
279 	DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
280 	DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
281 
282 	p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
283 	p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
284 	p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
285 	p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
286 	p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
287 	p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
288 
289 	tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
290 	p_data->tx_max_fc_pay_len = tmp;
291 	tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
292 	p_data->e_d_tov_timer_val = tmp;
293 	tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
294 	p_data->rec_rr_tov_timer_val = tmp;
295 	tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
296 	p_data->rx_max_fc_pay_len = tmp;
297 
298 	p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
299 	p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
300 	p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
301 	p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
302 	p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
303 	p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
304 	p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
305 	p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
306 	p_data->flags = p_conn->flags;
307 	p_data->def_q_idx = p_conn->def_q_idx;
308 
309 	return qed_spq_post(p_hwfn, p_ent, NULL);
310 }
311 
312 static int
313 qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
314 			 struct qed_fcoe_conn *p_conn,
315 			 enum spq_mode comp_mode,
316 			 struct qed_spq_comp_cb *p_comp_addr)
317 {
318 	struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
319 	struct qed_spq_entry *p_ent = NULL;
320 	struct qed_sp_init_data init_data;
321 	int rc = 0;
322 
323 	/* Get SPQ entry */
324 	memset(&init_data, 0, sizeof(init_data));
325 	init_data.cid = p_conn->icid;
326 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
327 	init_data.comp_mode = comp_mode;
328 	init_data.p_comp_data = p_comp_addr;
329 
330 	rc = qed_sp_init_request(p_hwfn, &p_ent,
331 				 FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
332 				 PROTOCOLID_FCOE, &init_data);
333 	if (rc)
334 		return rc;
335 
336 	p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
337 	DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
338 		       p_conn->terminate_params);
339 
340 	return qed_spq_post(p_hwfn, p_ent, NULL);
341 }
342 
343 static int
344 qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
345 		      struct qed_ptt *p_ptt,
346 		      enum spq_mode comp_mode,
347 		      struct qed_spq_comp_cb *p_comp_addr)
348 {
349 	struct qed_spq_entry *p_ent = NULL;
350 	struct qed_sp_init_data init_data;
351 	u32 active_segs = 0;
352 	int rc = 0;
353 
354 	/* Get SPQ entry */
355 	memset(&init_data, 0, sizeof(init_data));
356 	init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
357 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
358 	init_data.comp_mode = comp_mode;
359 	init_data.p_comp_data = p_comp_addr;
360 
361 	rc = qed_sp_init_request(p_hwfn, &p_ent,
362 				 FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
363 				 PROTOCOLID_FCOE, &init_data);
364 	if (rc)
365 		return rc;
366 
367 	active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
368 	active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
369 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
370 
371 	return qed_spq_post(p_hwfn, p_ent, NULL);
372 }
373 
374 static int
375 qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
376 			     struct qed_fcoe_conn **p_out_conn)
377 {
378 	struct qed_fcoe_conn *p_conn = NULL;
379 	void *p_addr;
380 	u32 i;
381 
382 	spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
383 	if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
384 		p_conn =
385 		    list_first_entry(&p_hwfn->p_fcoe_info->free_list,
386 				     struct qed_fcoe_conn, list_entry);
387 	if (p_conn) {
388 		list_del(&p_conn->list_entry);
389 		spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
390 		*p_out_conn = p_conn;
391 		return 0;
392 	}
393 	spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
394 
395 	p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
396 	if (!p_conn)
397 		return -ENOMEM;
398 
399 	p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
400 				    QED_CHAIN_PAGE_SIZE,
401 				    &p_conn->xferq_pbl_addr, GFP_KERNEL);
402 	if (!p_addr)
403 		goto nomem_pbl_xferq;
404 	p_conn->xferq_pbl_addr_virt_addr = p_addr;
405 
406 	for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
407 		p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
408 					    QED_CHAIN_PAGE_SIZE,
409 					    &p_conn->xferq_addr[i], GFP_KERNEL);
410 		if (!p_addr)
411 			goto nomem_xferq;
412 		p_conn->xferq_addr_virt_addr[i] = p_addr;
413 
414 		p_addr = p_conn->xferq_pbl_addr_virt_addr;
415 		((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
416 	}
417 
418 	p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
419 				    QED_CHAIN_PAGE_SIZE,
420 				    &p_conn->confq_pbl_addr, GFP_KERNEL);
421 	if (!p_addr)
422 		goto nomem_xferq;
423 	p_conn->confq_pbl_addr_virt_addr = p_addr;
424 
425 	for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
426 		p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
427 					    QED_CHAIN_PAGE_SIZE,
428 					    &p_conn->confq_addr[i], GFP_KERNEL);
429 		if (!p_addr)
430 			goto nomem_confq;
431 		p_conn->confq_addr_virt_addr[i] = p_addr;
432 
433 		p_addr = p_conn->confq_pbl_addr_virt_addr;
434 		((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
435 	}
436 
437 	p_conn->free_on_delete = true;
438 	*p_out_conn = p_conn;
439 	return 0;
440 
441 nomem_confq:
442 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
443 			  QED_CHAIN_PAGE_SIZE,
444 			  p_conn->confq_pbl_addr_virt_addr,
445 			  p_conn->confq_pbl_addr);
446 	for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
447 		if (p_conn->confq_addr_virt_addr[i])
448 			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
449 					  QED_CHAIN_PAGE_SIZE,
450 					  p_conn->confq_addr_virt_addr[i],
451 					  p_conn->confq_addr[i]);
452 nomem_xferq:
453 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
454 			  QED_CHAIN_PAGE_SIZE,
455 			  p_conn->xferq_pbl_addr_virt_addr,
456 			  p_conn->xferq_pbl_addr);
457 	for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
458 		if (p_conn->xferq_addr_virt_addr[i])
459 			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
460 					  QED_CHAIN_PAGE_SIZE,
461 					  p_conn->xferq_addr_virt_addr[i],
462 					  p_conn->xferq_addr[i]);
463 nomem_pbl_xferq:
464 	kfree(p_conn);
465 	return -ENOMEM;
466 }
467 
468 static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
469 				     struct qed_fcoe_conn *p_conn)
470 {
471 	u32 i;
472 
473 	if (!p_conn)
474 		return;
475 
476 	if (p_conn->confq_pbl_addr_virt_addr)
477 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
478 				  QED_CHAIN_PAGE_SIZE,
479 				  p_conn->confq_pbl_addr_virt_addr,
480 				  p_conn->confq_pbl_addr);
481 
482 	for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
483 		if (!p_conn->confq_addr_virt_addr[i])
484 			continue;
485 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
486 				  QED_CHAIN_PAGE_SIZE,
487 				  p_conn->confq_addr_virt_addr[i],
488 				  p_conn->confq_addr[i]);
489 	}
490 
491 	if (p_conn->xferq_pbl_addr_virt_addr)
492 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
493 				  QED_CHAIN_PAGE_SIZE,
494 				  p_conn->xferq_pbl_addr_virt_addr,
495 				  p_conn->xferq_pbl_addr);
496 
497 	for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
498 		if (!p_conn->xferq_addr_virt_addr[i])
499 			continue;
500 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
501 				  QED_CHAIN_PAGE_SIZE,
502 				  p_conn->xferq_addr_virt_addr[i],
503 				  p_conn->xferq_addr[i]);
504 	}
505 	kfree(p_conn);
506 }
507 
508 static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
509 {
510 	return (u8 __iomem *)p_hwfn->doorbells +
511 	       qed_db_addr(cid, DQ_DEMS_LEGACY);
512 }
513 
514 static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
515 						   u8 bdq_id)
516 {
517 	if (RESC_NUM(p_hwfn, QED_BDQ)) {
518 		return (u8 __iomem *)p_hwfn->regview +
519 		       GTT_BAR0_MAP_REG_MSDM_RAM +
520 		       MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
521 								  QED_BDQ),
522 						       bdq_id);
523 	} else {
524 		DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
525 		return NULL;
526 	}
527 }
528 
529 static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
530 						     u8 bdq_id)
531 {
532 	if (RESC_NUM(p_hwfn, QED_BDQ)) {
533 		return (u8 __iomem *)p_hwfn->regview +
534 		       GTT_BAR0_MAP_REG_TSDM_RAM +
535 		       TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
536 								  QED_BDQ),
537 						       bdq_id);
538 	} else {
539 		DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
540 		return NULL;
541 	}
542 }
543 
544 int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
545 {
546 	struct qed_fcoe_info *p_fcoe_info;
547 
548 	/* Allocate LL2's set struct */
549 	p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
550 	if (!p_fcoe_info) {
551 		DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
552 		return -ENOMEM;
553 	}
554 	INIT_LIST_HEAD(&p_fcoe_info->free_list);
555 
556 	p_hwfn->p_fcoe_info = p_fcoe_info;
557 	return 0;
558 }
559 
560 void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
561 {
562 	struct fcoe_task_context *p_task_ctx = NULL;
563 	int rc;
564 	u32 i;
565 
566 	spin_lock_init(&p_hwfn->p_fcoe_info->lock);
567 	for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
568 		rc = qed_cxt_get_task_ctx(p_hwfn, i,
569 					  QED_CTX_WORKING_MEM,
570 					  (void **)&p_task_ctx);
571 		if (rc)
572 			continue;
573 
574 		memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
575 		SET_FIELD(p_task_ctx->timer_context.logical_client_0,
576 			  TIMERS_CONTEXT_VALIDLC0, 1);
577 		SET_FIELD(p_task_ctx->timer_context.logical_client_1,
578 			  TIMERS_CONTEXT_VALIDLC1, 1);
579 		SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
580 			  TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
581 	}
582 }
583 
584 void qed_fcoe_free(struct qed_hwfn *p_hwfn)
585 {
586 	struct qed_fcoe_conn *p_conn = NULL;
587 
588 	if (!p_hwfn->p_fcoe_info)
589 		return;
590 
591 	while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
592 		p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
593 					  struct qed_fcoe_conn, list_entry);
594 		if (!p_conn)
595 			break;
596 		list_del(&p_conn->list_entry);
597 		qed_fcoe_free_connection(p_hwfn, p_conn);
598 	}
599 
600 	kfree(p_hwfn->p_fcoe_info);
601 	p_hwfn->p_fcoe_info = NULL;
602 }
603 
604 static int
605 qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
606 			    struct qed_fcoe_conn *p_in_conn,
607 			    struct qed_fcoe_conn **p_out_conn)
608 {
609 	struct qed_fcoe_conn *p_conn = NULL;
610 	int rc = 0;
611 	u32 icid;
612 
613 	spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
614 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
615 	spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
616 	if (rc)
617 		return rc;
618 
619 	/* Use input connection [if provided] or allocate a new one */
620 	if (p_in_conn) {
621 		p_conn = p_in_conn;
622 	} else {
623 		rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
624 		if (rc) {
625 			spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
626 			qed_cxt_release_cid(p_hwfn, icid);
627 			spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
628 			return rc;
629 		}
630 	}
631 
632 	p_conn->icid = icid;
633 	p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
634 	*p_out_conn = p_conn;
635 
636 	return rc;
637 }
638 
639 static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
640 					struct qed_fcoe_conn *p_conn)
641 {
642 	spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
643 	list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
644 	qed_cxt_release_cid(p_hwfn, p_conn->icid);
645 	spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
646 }
647 
648 static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
649 				 struct qed_ptt *p_ptt,
650 				 struct qed_fcoe_stats *p_stats)
651 {
652 	struct fcoe_rx_stat tstats;
653 	u32 tstats_addr;
654 
655 	memset(&tstats, 0, sizeof(tstats));
656 	tstats_addr = BAR0_MAP_REG_TSDM_RAM +
657 	    TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
658 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
659 
660 	p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
661 	p_stats->fcoe_rx_data_pkt_cnt =
662 	    HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
663 	p_stats->fcoe_rx_xfer_pkt_cnt =
664 	    HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
665 	p_stats->fcoe_rx_other_pkt_cnt =
666 	    HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
667 
668 	p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
669 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
670 	p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
671 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
672 	p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
673 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
674 	p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
675 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
676 	p_stats->fcoe_silent_drop_total_pkt_cnt =
677 	    le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
678 }
679 
680 static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
681 				 struct qed_ptt *p_ptt,
682 				 struct qed_fcoe_stats *p_stats)
683 {
684 	struct fcoe_tx_stat pstats;
685 	u32 pstats_addr;
686 
687 	memset(&pstats, 0, sizeof(pstats));
688 	pstats_addr = BAR0_MAP_REG_PSDM_RAM +
689 	    PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
690 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
691 
692 	p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
693 	p_stats->fcoe_tx_data_pkt_cnt =
694 	    HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
695 	p_stats->fcoe_tx_xfer_pkt_cnt =
696 	    HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
697 	p_stats->fcoe_tx_other_pkt_cnt =
698 	    HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
699 }
700 
701 static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
702 			      struct qed_fcoe_stats *p_stats)
703 {
704 	struct qed_ptt *p_ptt;
705 
706 	memset(p_stats, 0, sizeof(*p_stats));
707 
708 	p_ptt = qed_ptt_acquire(p_hwfn);
709 
710 	if (!p_ptt) {
711 		DP_ERR(p_hwfn, "Failed to acquire ptt\n");
712 		return -EINVAL;
713 	}
714 
715 	_qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
716 	_qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
717 
718 	qed_ptt_release(p_hwfn, p_ptt);
719 
720 	return 0;
721 }
722 
723 struct qed_hash_fcoe_con {
724 	struct hlist_node node;
725 	struct qed_fcoe_conn *con;
726 };
727 
728 static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
729 				  struct qed_dev_fcoe_info *info)
730 {
731 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
732 	int rc;
733 
734 	memset(info, 0, sizeof(*info));
735 	rc = qed_fill_dev_info(cdev, &info->common);
736 
737 	info->primary_dbq_rq_addr =
738 	    qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
739 	info->secondary_bdq_rq_addr =
740 	    qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
741 
742 	return rc;
743 }
744 
745 static void qed_register_fcoe_ops(struct qed_dev *cdev,
746 				  struct qed_fcoe_cb_ops *ops, void *cookie)
747 {
748 	cdev->protocol_ops.fcoe = ops;
749 	cdev->ops_cookie = cookie;
750 }
751 
752 static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
753 						   u32 handle)
754 {
755 	struct qed_hash_fcoe_con *hash_con = NULL;
756 
757 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
758 		return NULL;
759 
760 	hash_for_each_possible(cdev->connections, hash_con, node, handle) {
761 		if (hash_con->con->icid == handle)
762 			break;
763 	}
764 
765 	if (!hash_con || (hash_con->con->icid != handle))
766 		return NULL;
767 
768 	return hash_con;
769 }
770 
771 static int qed_fcoe_stop(struct qed_dev *cdev)
772 {
773 	struct qed_ptt *p_ptt;
774 	int rc;
775 
776 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
777 		DP_NOTICE(cdev, "fcoe already stopped\n");
778 		return 0;
779 	}
780 
781 	if (!hash_empty(cdev->connections)) {
782 		DP_NOTICE(cdev,
783 			  "Can't stop fcoe - not all connections were returned\n");
784 		return -EINVAL;
785 	}
786 
787 	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
788 	if (!p_ptt)
789 		return -EAGAIN;
790 
791 	/* Stop the fcoe */
792 	rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
793 				   QED_SPQ_MODE_EBLOCK, NULL);
794 	cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
795 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
796 
797 	return rc;
798 }
799 
800 static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
801 {
802 	int rc;
803 
804 	if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
805 		DP_NOTICE(cdev, "fcoe already started;\n");
806 		return 0;
807 	}
808 
809 	rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
810 				    QED_SPQ_MODE_EBLOCK, NULL);
811 	if (rc) {
812 		DP_NOTICE(cdev, "Failed to start fcoe\n");
813 		return rc;
814 	}
815 
816 	cdev->flags |= QED_FLAG_STORAGE_STARTED;
817 	hash_init(cdev->connections);
818 
819 	if (tasks) {
820 		struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
821 						       GFP_ATOMIC);
822 
823 		if (!tid_info) {
824 			DP_NOTICE(cdev,
825 				  "Failed to allocate tasks information\n");
826 			qed_fcoe_stop(cdev);
827 			return -ENOMEM;
828 		}
829 
830 		rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
831 		if (rc) {
832 			DP_NOTICE(cdev, "Failed to gather task information\n");
833 			qed_fcoe_stop(cdev);
834 			kfree(tid_info);
835 			return rc;
836 		}
837 
838 		/* Fill task information */
839 		tasks->size = tid_info->tid_size;
840 		tasks->num_tids_per_block = tid_info->num_tids_per_block;
841 		memcpy(tasks->blocks, tid_info->blocks,
842 		       MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
843 
844 		kfree(tid_info);
845 	}
846 
847 	return 0;
848 }
849 
850 static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
851 				 u32 *handle,
852 				 u32 *fw_cid, void __iomem **p_doorbell)
853 {
854 	struct qed_hash_fcoe_con *hash_con;
855 	int rc;
856 
857 	/* Allocate a hashed connection */
858 	hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
859 	if (!hash_con) {
860 		DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
861 		return -ENOMEM;
862 	}
863 
864 	/* Acquire the connection */
865 	rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
866 					 &hash_con->con);
867 	if (rc) {
868 		DP_NOTICE(cdev, "Failed to acquire Connection\n");
869 		kfree(hash_con);
870 		return rc;
871 	}
872 
873 	/* Added the connection to hash table */
874 	*handle = hash_con->con->icid;
875 	*fw_cid = hash_con->con->fw_cid;
876 	hash_add(cdev->connections, &hash_con->node, *handle);
877 
878 	if (p_doorbell)
879 		*p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
880 						   *handle);
881 
882 	return 0;
883 }
884 
885 static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
886 {
887 	struct qed_hash_fcoe_con *hash_con;
888 
889 	hash_con = qed_fcoe_get_hash(cdev, handle);
890 	if (!hash_con) {
891 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
892 			  handle);
893 		return -EINVAL;
894 	}
895 
896 	hlist_del(&hash_con->node);
897 	qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
898 	kfree(hash_con);
899 
900 	return 0;
901 }
902 
903 static int qed_fcoe_offload_conn(struct qed_dev *cdev,
904 				 u32 handle,
905 				 struct qed_fcoe_params_offload *conn_info)
906 {
907 	struct qed_hash_fcoe_con *hash_con;
908 	struct qed_fcoe_conn *con;
909 
910 	hash_con = qed_fcoe_get_hash(cdev, handle);
911 	if (!hash_con) {
912 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
913 			  handle);
914 		return -EINVAL;
915 	}
916 
917 	/* Update the connection with information from the params */
918 	con = hash_con->con;
919 
920 	con->sq_pbl_addr = conn_info->sq_pbl_addr;
921 	con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
922 	con->sq_next_page_addr = conn_info->sq_next_page_addr;
923 	con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
924 	con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
925 	con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
926 	con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
927 	con->vlan_tag = conn_info->vlan_tag;
928 	con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
929 	con->flags = conn_info->flags;
930 	con->def_q_idx = conn_info->def_q_idx;
931 
932 	con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
933 	    conn_info->src_mac[4];
934 	con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
935 	    conn_info->src_mac[2];
936 	con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
937 	    conn_info->src_mac[0];
938 	con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
939 	    conn_info->dst_mac[4];
940 	con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
941 	    conn_info->dst_mac[2];
942 	con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
943 	    conn_info->dst_mac[0];
944 
945 	con->s_id.addr_hi = conn_info->s_id.addr_hi;
946 	con->s_id.addr_mid = conn_info->s_id.addr_mid;
947 	con->s_id.addr_lo = conn_info->s_id.addr_lo;
948 	con->d_id.addr_hi = conn_info->d_id.addr_hi;
949 	con->d_id.addr_mid = conn_info->d_id.addr_mid;
950 	con->d_id.addr_lo = conn_info->d_id.addr_lo;
951 
952 	return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
953 					QED_SPQ_MODE_EBLOCK, NULL);
954 }
955 
956 static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
957 				 u32 handle, dma_addr_t terminate_params)
958 {
959 	struct qed_hash_fcoe_con *hash_con;
960 	struct qed_fcoe_conn *con;
961 
962 	hash_con = qed_fcoe_get_hash(cdev, handle);
963 	if (!hash_con) {
964 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
965 			  handle);
966 		return -EINVAL;
967 	}
968 
969 	/* Update the connection with information from the params */
970 	con = hash_con->con;
971 	con->terminate_params = terminate_params;
972 
973 	return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
974 					QED_SPQ_MODE_EBLOCK, NULL);
975 }
976 
977 static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
978 {
979 	return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
980 }
981 
982 void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
983 				 struct qed_mcp_fcoe_stats *stats)
984 {
985 	struct qed_fcoe_stats proto_stats;
986 
987 	/* Retrieve FW statistics */
988 	memset(&proto_stats, 0, sizeof(proto_stats));
989 	if (qed_fcoe_stats(cdev, &proto_stats)) {
990 		DP_VERBOSE(cdev, QED_MSG_STORAGE,
991 			   "Failed to collect FCoE statistics\n");
992 		return;
993 	}
994 
995 	/* Translate FW statistics into struct */
996 	stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
997 			 proto_stats.fcoe_rx_xfer_pkt_cnt +
998 			 proto_stats.fcoe_rx_other_pkt_cnt;
999 	stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
1000 			 proto_stats.fcoe_tx_xfer_pkt_cnt +
1001 			 proto_stats.fcoe_tx_other_pkt_cnt;
1002 	stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
1003 
1004 	/* Request protocol driver to fill-in the rest */
1005 	if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
1006 		struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
1007 		void *cookie = cdev->ops_cookie;
1008 
1009 		if (ops->get_login_failures)
1010 			stats->login_failure = ops->get_login_failures(cookie);
1011 	}
1012 }
1013 
1014 static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
1015 	.common = &qed_common_ops_pass,
1016 	.ll2 = &qed_ll2_ops_pass,
1017 	.fill_dev_info = &qed_fill_fcoe_dev_info,
1018 	.start = &qed_fcoe_start,
1019 	.stop = &qed_fcoe_stop,
1020 	.register_ops = &qed_register_fcoe_ops,
1021 	.acquire_conn = &qed_fcoe_acquire_conn,
1022 	.release_conn = &qed_fcoe_release_conn,
1023 	.offload_conn = &qed_fcoe_offload_conn,
1024 	.destroy_conn = &qed_fcoe_destroy_conn,
1025 	.get_stats = &qed_fcoe_stats,
1026 };
1027 
1028 const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
1029 {
1030 	return &qed_fcoe_ops_pass;
1031 }
1032 EXPORT_SYMBOL(qed_get_fcoe_ops);
1033 
1034 void qed_put_fcoe_ops(void)
1035 {
1036 }
1037 EXPORT_SYMBOL(qed_put_fcoe_ops);
1038