1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/interrupt.h>
39 #include <linux/kernel.h>
40 #include <linux/log2.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/version.h>
47 #include <linux/workqueue.h>
48 #include <linux/errno.h>
49 #include <linux/list.h>
50 #include <linux/spinlock.h>
51 #define __PREVENT_DUMP_MEM_ARR__
52 #define __PREVENT_PXP_GLOBAL_WIN__
53 #include "qed.h"
54 #include "qed_cxt.h"
55 #include "qed_dev_api.h"
56 #include "qed_fcoe.h"
57 #include "qed_hsi.h"
58 #include "qed_hw.h"
59 #include "qed_int.h"
60 #include "qed_ll2.h"
61 #include "qed_mcp.h"
62 #include "qed_reg_addr.h"
63 #include "qed_sp.h"
64 #include "qed_sriov.h"
65 #include <linux/qed/qed_fcoe_if.h>
66 
67 struct qed_fcoe_conn {
68 	struct list_head list_entry;
69 	bool free_on_delete;
70 
71 	u16 conn_id;
72 	u32 icid;
73 	u32 fw_cid;
74 	u8 layer_code;
75 
76 	dma_addr_t sq_pbl_addr;
77 	dma_addr_t sq_curr_page_addr;
78 	dma_addr_t sq_next_page_addr;
79 	dma_addr_t xferq_pbl_addr;
80 	void *xferq_pbl_addr_virt_addr;
81 	dma_addr_t xferq_addr[4];
82 	void *xferq_addr_virt_addr[4];
83 	dma_addr_t confq_pbl_addr;
84 	void *confq_pbl_addr_virt_addr;
85 	dma_addr_t confq_addr[2];
86 	void *confq_addr_virt_addr[2];
87 
88 	dma_addr_t terminate_params;
89 
90 	u16 dst_mac_addr_lo;
91 	u16 dst_mac_addr_mid;
92 	u16 dst_mac_addr_hi;
93 	u16 src_mac_addr_lo;
94 	u16 src_mac_addr_mid;
95 	u16 src_mac_addr_hi;
96 
97 	u16 tx_max_fc_pay_len;
98 	u16 e_d_tov_timer_val;
99 	u16 rec_tov_timer_val;
100 	u16 rx_max_fc_pay_len;
101 	u16 vlan_tag;
102 	u16 physical_q0;
103 
104 	struct fc_addr_nw s_id;
105 	u8 max_conc_seqs_c3;
106 	struct fc_addr_nw d_id;
107 	u8 flags;
108 	u8 def_q_idx;
109 };
110 
111 static int
112 qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
113 		       enum spq_mode comp_mode,
114 		       struct qed_spq_comp_cb *p_comp_addr)
115 {
116 	struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
117 	struct fcoe_init_ramrod_params *p_ramrod = NULL;
118 	struct fcoe_init_func_ramrod_data *p_data;
119 	struct fcoe_conn_context *p_cxt = NULL;
120 	struct qed_spq_entry *p_ent = NULL;
121 	struct qed_sp_init_data init_data;
122 	struct qed_cxt_info cxt_info;
123 	u32 dummy_cid;
124 	int rc = 0;
125 	u16 tmp;
126 	u8 i;
127 
128 	/* Get SPQ entry */
129 	memset(&init_data, 0, sizeof(init_data));
130 	init_data.cid = qed_spq_get_cid(p_hwfn);
131 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
132 	init_data.comp_mode = comp_mode;
133 	init_data.p_comp_data = p_comp_addr;
134 
135 	rc = qed_sp_init_request(p_hwfn, &p_ent,
136 				 FCOE_RAMROD_CMD_ID_INIT_FUNC,
137 				 PROTOCOLID_FCOE, &init_data);
138 	if (rc)
139 		return rc;
140 
141 	p_ramrod = &p_ent->ramrod.fcoe_init;
142 	p_data = &p_ramrod->init_ramrod_data;
143 	fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
144 
145 	p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
146 	tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
147 	p_data->sq_num_pages_in_pbl = tmp;
148 
149 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
150 	if (rc)
151 		return rc;
152 
153 	cxt_info.iid = dummy_cid;
154 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
155 	if (rc) {
156 		DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
157 			  dummy_cid);
158 		return rc;
159 	}
160 	p_cxt = cxt_info.p_cxt;
161 	SET_FIELD(p_cxt->tstorm_ag_context.flags3,
162 		  TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
163 
164 	fcoe_pf_params->dummy_icid = (u16)dummy_cid;
165 
166 	tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
167 	p_data->func_params.num_tasks = tmp;
168 	p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
169 	p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
170 
171 	DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
172 		       fcoe_pf_params->glbl_q_params_addr);
173 
174 	tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
175 	p_data->q_params.cq_num_entries = tmp;
176 
177 	tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
178 	p_data->q_params.cmdq_num_entries = tmp;
179 
180 	tmp = fcoe_pf_params->num_cqs;
181 	p_data->q_params.num_queues = (u8)tmp;
182 
183 	tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
184 	p_data->q_params.queue_relative_offset = (u8)tmp;
185 
186 	for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
187 		tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id);
188 		p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
189 	}
190 
191 	p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
192 	p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
193 
194 	p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
195 
196 	DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
197 		       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
198 	p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
199 	    fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
200 	tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
201 	p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
202 	tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
203 	p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
204 
205 	DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
206 		       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
207 	p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
208 	    fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
209 	tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
210 	p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
211 	tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
212 	p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
213 	tmp = fcoe_pf_params->rq_buffer_size;
214 	p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
215 
216 	if (fcoe_pf_params->is_target) {
217 		SET_FIELD(p_data->q_params.q_validity,
218 			  SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
219 		if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
220 			SET_FIELD(p_data->q_params.q_validity,
221 				  SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
222 		SET_FIELD(p_data->q_params.q_validity,
223 			  SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
224 	} else {
225 		SET_FIELD(p_data->q_params.q_validity,
226 			  SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
227 	}
228 
229 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
230 
231 	return rc;
232 }
233 
234 static int
235 qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
236 			 struct qed_fcoe_conn *p_conn,
237 			 enum spq_mode comp_mode,
238 			 struct qed_spq_comp_cb *p_comp_addr)
239 {
240 	struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
241 	struct fcoe_conn_offload_ramrod_data *p_data;
242 	struct qed_spq_entry *p_ent = NULL;
243 	struct qed_sp_init_data init_data;
244 	u16 physical_q0, tmp;
245 	int rc;
246 
247 	/* Get SPQ entry */
248 	memset(&init_data, 0, sizeof(init_data));
249 	init_data.cid = p_conn->icid;
250 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
251 	init_data.comp_mode = comp_mode;
252 	init_data.p_comp_data = p_comp_addr;
253 
254 	rc = qed_sp_init_request(p_hwfn, &p_ent,
255 				 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
256 				 PROTOCOLID_FCOE, &init_data);
257 	if (rc)
258 		return rc;
259 
260 	p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
261 	p_data = &p_ramrod->offload_ramrod_data;
262 
263 	/* Transmission PQ is the first of the PF */
264 	physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
265 	p_conn->physical_q0 = cpu_to_le16(physical_q0);
266 	p_data->physical_q0 = cpu_to_le16(physical_q0);
267 
268 	p_data->conn_id = cpu_to_le16(p_conn->conn_id);
269 	DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
270 	DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
271 	DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
272 	DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
273 	DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
274 	DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
275 
276 	DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
277 	DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
278 	DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
279 
280 	p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
281 	p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
282 	p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
283 	p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
284 	p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
285 	p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
286 
287 	tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
288 	p_data->tx_max_fc_pay_len = tmp;
289 	tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
290 	p_data->e_d_tov_timer_val = tmp;
291 	tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
292 	p_data->rec_rr_tov_timer_val = tmp;
293 	tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
294 	p_data->rx_max_fc_pay_len = tmp;
295 
296 	p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
297 	p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
298 	p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
299 	p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
300 	p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
301 	p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
302 	p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
303 	p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
304 	p_data->flags = p_conn->flags;
305 	p_data->def_q_idx = p_conn->def_q_idx;
306 
307 	return qed_spq_post(p_hwfn, p_ent, NULL);
308 }
309 
310 static int
311 qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
312 			 struct qed_fcoe_conn *p_conn,
313 			 enum spq_mode comp_mode,
314 			 struct qed_spq_comp_cb *p_comp_addr)
315 {
316 	struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
317 	struct qed_spq_entry *p_ent = NULL;
318 	struct qed_sp_init_data init_data;
319 	int rc = 0;
320 
321 	/* Get SPQ entry */
322 	memset(&init_data, 0, sizeof(init_data));
323 	init_data.cid = p_conn->icid;
324 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
325 	init_data.comp_mode = comp_mode;
326 	init_data.p_comp_data = p_comp_addr;
327 
328 	rc = qed_sp_init_request(p_hwfn, &p_ent,
329 				 FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
330 				 PROTOCOLID_FCOE, &init_data);
331 	if (rc)
332 		return rc;
333 
334 	p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
335 	DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
336 		       p_conn->terminate_params);
337 
338 	return qed_spq_post(p_hwfn, p_ent, NULL);
339 }
340 
341 static int
342 qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
343 		      struct qed_ptt *p_ptt,
344 		      enum spq_mode comp_mode,
345 		      struct qed_spq_comp_cb *p_comp_addr)
346 {
347 	struct qed_spq_entry *p_ent = NULL;
348 	struct qed_sp_init_data init_data;
349 	u32 active_segs = 0;
350 	int rc = 0;
351 
352 	/* Get SPQ entry */
353 	memset(&init_data, 0, sizeof(init_data));
354 	init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
355 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
356 	init_data.comp_mode = comp_mode;
357 	init_data.p_comp_data = p_comp_addr;
358 
359 	rc = qed_sp_init_request(p_hwfn, &p_ent,
360 				 FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
361 				 PROTOCOLID_FCOE, &init_data);
362 	if (rc)
363 		return rc;
364 
365 	active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
366 	active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
367 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
368 
369 	return qed_spq_post(p_hwfn, p_ent, NULL);
370 }
371 
372 static int
373 qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
374 			     struct qed_fcoe_conn **p_out_conn)
375 {
376 	struct qed_fcoe_conn *p_conn = NULL;
377 	void *p_addr;
378 	u32 i;
379 
380 	spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
381 	if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
382 		p_conn =
383 		    list_first_entry(&p_hwfn->p_fcoe_info->free_list,
384 				     struct qed_fcoe_conn, list_entry);
385 	if (p_conn) {
386 		list_del(&p_conn->list_entry);
387 		spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
388 		*p_out_conn = p_conn;
389 		return 0;
390 	}
391 	spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
392 
393 	p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
394 	if (!p_conn)
395 		return -ENOMEM;
396 
397 	p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
398 				    QED_CHAIN_PAGE_SIZE,
399 				    &p_conn->xferq_pbl_addr, GFP_KERNEL);
400 	if (!p_addr)
401 		goto nomem_pbl_xferq;
402 	p_conn->xferq_pbl_addr_virt_addr = p_addr;
403 
404 	for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
405 		p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
406 					    QED_CHAIN_PAGE_SIZE,
407 					    &p_conn->xferq_addr[i], GFP_KERNEL);
408 		if (!p_addr)
409 			goto nomem_xferq;
410 		p_conn->xferq_addr_virt_addr[i] = p_addr;
411 
412 		p_addr = p_conn->xferq_pbl_addr_virt_addr;
413 		((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
414 	}
415 
416 	p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
417 				    QED_CHAIN_PAGE_SIZE,
418 				    &p_conn->confq_pbl_addr, GFP_KERNEL);
419 	if (!p_addr)
420 		goto nomem_xferq;
421 	p_conn->confq_pbl_addr_virt_addr = p_addr;
422 
423 	for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
424 		p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
425 					    QED_CHAIN_PAGE_SIZE,
426 					    &p_conn->confq_addr[i], GFP_KERNEL);
427 		if (!p_addr)
428 			goto nomem_confq;
429 		p_conn->confq_addr_virt_addr[i] = p_addr;
430 
431 		p_addr = p_conn->confq_pbl_addr_virt_addr;
432 		((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
433 	}
434 
435 	p_conn->free_on_delete = true;
436 	*p_out_conn = p_conn;
437 	return 0;
438 
439 nomem_confq:
440 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
441 			  QED_CHAIN_PAGE_SIZE,
442 			  p_conn->confq_pbl_addr_virt_addr,
443 			  p_conn->confq_pbl_addr);
444 	for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
445 		if (p_conn->confq_addr_virt_addr[i])
446 			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
447 					  QED_CHAIN_PAGE_SIZE,
448 					  p_conn->confq_addr_virt_addr[i],
449 					  p_conn->confq_addr[i]);
450 nomem_xferq:
451 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
452 			  QED_CHAIN_PAGE_SIZE,
453 			  p_conn->xferq_pbl_addr_virt_addr,
454 			  p_conn->xferq_pbl_addr);
455 	for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
456 		if (p_conn->xferq_addr_virt_addr[i])
457 			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
458 					  QED_CHAIN_PAGE_SIZE,
459 					  p_conn->xferq_addr_virt_addr[i],
460 					  p_conn->xferq_addr[i]);
461 nomem_pbl_xferq:
462 	kfree(p_conn);
463 	return -ENOMEM;
464 }
465 
466 static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
467 				     struct qed_fcoe_conn *p_conn)
468 {
469 	u32 i;
470 
471 	if (!p_conn)
472 		return;
473 
474 	if (p_conn->confq_pbl_addr_virt_addr)
475 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
476 				  QED_CHAIN_PAGE_SIZE,
477 				  p_conn->confq_pbl_addr_virt_addr,
478 				  p_conn->confq_pbl_addr);
479 
480 	for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
481 		if (!p_conn->confq_addr_virt_addr[i])
482 			continue;
483 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
484 				  QED_CHAIN_PAGE_SIZE,
485 				  p_conn->confq_addr_virt_addr[i],
486 				  p_conn->confq_addr[i]);
487 	}
488 
489 	if (p_conn->xferq_pbl_addr_virt_addr)
490 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
491 				  QED_CHAIN_PAGE_SIZE,
492 				  p_conn->xferq_pbl_addr_virt_addr,
493 				  p_conn->xferq_pbl_addr);
494 
495 	for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
496 		if (!p_conn->xferq_addr_virt_addr[i])
497 			continue;
498 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
499 				  QED_CHAIN_PAGE_SIZE,
500 				  p_conn->xferq_addr_virt_addr[i],
501 				  p_conn->xferq_addr[i]);
502 	}
503 	kfree(p_conn);
504 }
505 
506 static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
507 {
508 	return (u8 __iomem *)p_hwfn->doorbells +
509 	       qed_db_addr(cid, DQ_DEMS_LEGACY);
510 }
511 
512 static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
513 						   u8 bdq_id)
514 {
515 	if (RESC_NUM(p_hwfn, QED_BDQ)) {
516 		return (u8 __iomem *)p_hwfn->regview +
517 		       GTT_BAR0_MAP_REG_MSDM_RAM +
518 		       MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
519 								  QED_BDQ),
520 						       bdq_id);
521 	} else {
522 		DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
523 		return NULL;
524 	}
525 }
526 
527 static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
528 						     u8 bdq_id)
529 {
530 	if (RESC_NUM(p_hwfn, QED_BDQ)) {
531 		return (u8 __iomem *)p_hwfn->regview +
532 		       GTT_BAR0_MAP_REG_TSDM_RAM +
533 		       TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
534 								  QED_BDQ),
535 						       bdq_id);
536 	} else {
537 		DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
538 		return NULL;
539 	}
540 }
541 
542 struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
543 {
544 	struct qed_fcoe_info *p_fcoe_info;
545 
546 	/* Allocate LL2's set struct */
547 	p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
548 	if (!p_fcoe_info) {
549 		DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
550 		return NULL;
551 	}
552 	INIT_LIST_HEAD(&p_fcoe_info->free_list);
553 	return p_fcoe_info;
554 }
555 
556 void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
557 {
558 	struct fcoe_task_context *p_task_ctx = NULL;
559 	int rc;
560 	u32 i;
561 
562 	spin_lock_init(&p_fcoe_info->lock);
563 	for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
564 		rc = qed_cxt_get_task_ctx(p_hwfn, i,
565 					  QED_CTX_WORKING_MEM,
566 					  (void **)&p_task_ctx);
567 		if (rc)
568 			continue;
569 
570 		memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
571 		SET_FIELD(p_task_ctx->timer_context.logical_client_0,
572 			  TIMERS_CONTEXT_VALIDLC0, 1);
573 		SET_FIELD(p_task_ctx->timer_context.logical_client_1,
574 			  TIMERS_CONTEXT_VALIDLC1, 1);
575 		SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
576 			  TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
577 	}
578 }
579 
580 void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
581 {
582 	struct qed_fcoe_conn *p_conn = NULL;
583 
584 	if (!p_fcoe_info)
585 		return;
586 
587 	while (!list_empty(&p_fcoe_info->free_list)) {
588 		p_conn = list_first_entry(&p_fcoe_info->free_list,
589 					  struct qed_fcoe_conn, list_entry);
590 		if (!p_conn)
591 			break;
592 		list_del(&p_conn->list_entry);
593 		qed_fcoe_free_connection(p_hwfn, p_conn);
594 	}
595 
596 	kfree(p_fcoe_info);
597 }
598 
599 static int
600 qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
601 			    struct qed_fcoe_conn *p_in_conn,
602 			    struct qed_fcoe_conn **p_out_conn)
603 {
604 	struct qed_fcoe_conn *p_conn = NULL;
605 	int rc = 0;
606 	u32 icid;
607 
608 	spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
609 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
610 	spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
611 	if (rc)
612 		return rc;
613 
614 	/* Use input connection [if provided] or allocate a new one */
615 	if (p_in_conn) {
616 		p_conn = p_in_conn;
617 	} else {
618 		rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
619 		if (rc) {
620 			spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
621 			qed_cxt_release_cid(p_hwfn, icid);
622 			spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
623 			return rc;
624 		}
625 	}
626 
627 	p_conn->icid = icid;
628 	p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
629 	*p_out_conn = p_conn;
630 
631 	return rc;
632 }
633 
634 static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
635 					struct qed_fcoe_conn *p_conn)
636 {
637 	spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
638 	list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
639 	qed_cxt_release_cid(p_hwfn, p_conn->icid);
640 	spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
641 }
642 
643 static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
644 				 struct qed_ptt *p_ptt,
645 				 struct qed_fcoe_stats *p_stats)
646 {
647 	struct fcoe_rx_stat tstats;
648 	u32 tstats_addr;
649 
650 	memset(&tstats, 0, sizeof(tstats));
651 	tstats_addr = BAR0_MAP_REG_TSDM_RAM +
652 	    TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
653 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
654 
655 	p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
656 	p_stats->fcoe_rx_data_pkt_cnt =
657 	    HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
658 	p_stats->fcoe_rx_xfer_pkt_cnt =
659 	    HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
660 	p_stats->fcoe_rx_other_pkt_cnt =
661 	    HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
662 
663 	p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
664 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
665 	p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
666 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
667 	p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
668 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
669 	p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
670 	    le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
671 	p_stats->fcoe_silent_drop_total_pkt_cnt =
672 	    le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
673 }
674 
675 static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
676 				 struct qed_ptt *p_ptt,
677 				 struct qed_fcoe_stats *p_stats)
678 {
679 	struct fcoe_tx_stat pstats;
680 	u32 pstats_addr;
681 
682 	memset(&pstats, 0, sizeof(pstats));
683 	pstats_addr = BAR0_MAP_REG_PSDM_RAM +
684 	    PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
685 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
686 
687 	p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
688 	p_stats->fcoe_tx_data_pkt_cnt =
689 	    HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
690 	p_stats->fcoe_tx_xfer_pkt_cnt =
691 	    HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
692 	p_stats->fcoe_tx_other_pkt_cnt =
693 	    HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
694 }
695 
696 static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
697 			      struct qed_fcoe_stats *p_stats)
698 {
699 	struct qed_ptt *p_ptt;
700 
701 	memset(p_stats, 0, sizeof(*p_stats));
702 
703 	p_ptt = qed_ptt_acquire(p_hwfn);
704 
705 	if (!p_ptt) {
706 		DP_ERR(p_hwfn, "Failed to acquire ptt\n");
707 		return -EINVAL;
708 	}
709 
710 	_qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
711 	_qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
712 
713 	qed_ptt_release(p_hwfn, p_ptt);
714 
715 	return 0;
716 }
717 
718 struct qed_hash_fcoe_con {
719 	struct hlist_node node;
720 	struct qed_fcoe_conn *con;
721 };
722 
723 static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
724 				  struct qed_dev_fcoe_info *info)
725 {
726 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
727 	int rc;
728 
729 	memset(info, 0, sizeof(*info));
730 	rc = qed_fill_dev_info(cdev, &info->common);
731 
732 	info->primary_dbq_rq_addr =
733 	    qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
734 	info->secondary_bdq_rq_addr =
735 	    qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
736 
737 	return rc;
738 }
739 
740 static void qed_register_fcoe_ops(struct qed_dev *cdev,
741 				  struct qed_fcoe_cb_ops *ops, void *cookie)
742 {
743 	cdev->protocol_ops.fcoe = ops;
744 	cdev->ops_cookie = cookie;
745 }
746 
747 static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
748 						   u32 handle)
749 {
750 	struct qed_hash_fcoe_con *hash_con = NULL;
751 
752 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
753 		return NULL;
754 
755 	hash_for_each_possible(cdev->connections, hash_con, node, handle) {
756 		if (hash_con->con->icid == handle)
757 			break;
758 	}
759 
760 	if (!hash_con || (hash_con->con->icid != handle))
761 		return NULL;
762 
763 	return hash_con;
764 }
765 
766 static int qed_fcoe_stop(struct qed_dev *cdev)
767 {
768 	struct qed_ptt *p_ptt;
769 	int rc;
770 
771 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
772 		DP_NOTICE(cdev, "fcoe already stopped\n");
773 		return 0;
774 	}
775 
776 	if (!hash_empty(cdev->connections)) {
777 		DP_NOTICE(cdev,
778 			  "Can't stop fcoe - not all connections were returned\n");
779 		return -EINVAL;
780 	}
781 
782 	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
783 	if (!p_ptt)
784 		return -EAGAIN;
785 
786 	/* Stop the fcoe */
787 	rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
788 				   QED_SPQ_MODE_EBLOCK, NULL);
789 	cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
790 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
791 
792 	return rc;
793 }
794 
795 static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
796 {
797 	int rc;
798 
799 	if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
800 		DP_NOTICE(cdev, "fcoe already started;\n");
801 		return 0;
802 	}
803 
804 	rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
805 				    QED_SPQ_MODE_EBLOCK, NULL);
806 	if (rc) {
807 		DP_NOTICE(cdev, "Failed to start fcoe\n");
808 		return rc;
809 	}
810 
811 	cdev->flags |= QED_FLAG_STORAGE_STARTED;
812 	hash_init(cdev->connections);
813 
814 	if (tasks) {
815 		struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
816 						       GFP_ATOMIC);
817 
818 		if (!tid_info) {
819 			DP_NOTICE(cdev,
820 				  "Failed to allocate tasks information\n");
821 			qed_fcoe_stop(cdev);
822 			return -ENOMEM;
823 		}
824 
825 		rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
826 		if (rc) {
827 			DP_NOTICE(cdev, "Failed to gather task information\n");
828 			qed_fcoe_stop(cdev);
829 			kfree(tid_info);
830 			return rc;
831 		}
832 
833 		/* Fill task information */
834 		tasks->size = tid_info->tid_size;
835 		tasks->num_tids_per_block = tid_info->num_tids_per_block;
836 		memcpy(tasks->blocks, tid_info->blocks,
837 		       MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
838 
839 		kfree(tid_info);
840 	}
841 
842 	return 0;
843 }
844 
845 static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
846 				 u32 *handle,
847 				 u32 *fw_cid, void __iomem **p_doorbell)
848 {
849 	struct qed_hash_fcoe_con *hash_con;
850 	int rc;
851 
852 	/* Allocate a hashed connection */
853 	hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
854 	if (!hash_con) {
855 		DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
856 		return -ENOMEM;
857 	}
858 
859 	/* Acquire the connection */
860 	rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
861 					 &hash_con->con);
862 	if (rc) {
863 		DP_NOTICE(cdev, "Failed to acquire Connection\n");
864 		kfree(hash_con);
865 		return rc;
866 	}
867 
868 	/* Added the connection to hash table */
869 	*handle = hash_con->con->icid;
870 	*fw_cid = hash_con->con->fw_cid;
871 	hash_add(cdev->connections, &hash_con->node, *handle);
872 
873 	if (p_doorbell)
874 		*p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
875 						   *handle);
876 
877 	return 0;
878 }
879 
880 static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
881 {
882 	struct qed_hash_fcoe_con *hash_con;
883 
884 	hash_con = qed_fcoe_get_hash(cdev, handle);
885 	if (!hash_con) {
886 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
887 			  handle);
888 		return -EINVAL;
889 	}
890 
891 	hlist_del(&hash_con->node);
892 	qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
893 	kfree(hash_con);
894 
895 	return 0;
896 }
897 
898 static int qed_fcoe_offload_conn(struct qed_dev *cdev,
899 				 u32 handle,
900 				 struct qed_fcoe_params_offload *conn_info)
901 {
902 	struct qed_hash_fcoe_con *hash_con;
903 	struct qed_fcoe_conn *con;
904 
905 	hash_con = qed_fcoe_get_hash(cdev, handle);
906 	if (!hash_con) {
907 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
908 			  handle);
909 		return -EINVAL;
910 	}
911 
912 	/* Update the connection with information from the params */
913 	con = hash_con->con;
914 
915 	con->sq_pbl_addr = conn_info->sq_pbl_addr;
916 	con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
917 	con->sq_next_page_addr = conn_info->sq_next_page_addr;
918 	con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
919 	con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
920 	con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
921 	con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
922 	con->vlan_tag = conn_info->vlan_tag;
923 	con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
924 	con->flags = conn_info->flags;
925 	con->def_q_idx = conn_info->def_q_idx;
926 
927 	con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
928 	    conn_info->src_mac[4];
929 	con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
930 	    conn_info->src_mac[2];
931 	con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
932 	    conn_info->src_mac[0];
933 	con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
934 	    conn_info->dst_mac[4];
935 	con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
936 	    conn_info->dst_mac[2];
937 	con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
938 	    conn_info->dst_mac[0];
939 
940 	con->s_id.addr_hi = conn_info->s_id.addr_hi;
941 	con->s_id.addr_mid = conn_info->s_id.addr_mid;
942 	con->s_id.addr_lo = conn_info->s_id.addr_lo;
943 	con->d_id.addr_hi = conn_info->d_id.addr_hi;
944 	con->d_id.addr_mid = conn_info->d_id.addr_mid;
945 	con->d_id.addr_lo = conn_info->d_id.addr_lo;
946 
947 	return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
948 					QED_SPQ_MODE_EBLOCK, NULL);
949 }
950 
951 static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
952 				 u32 handle, dma_addr_t terminate_params)
953 {
954 	struct qed_hash_fcoe_con *hash_con;
955 	struct qed_fcoe_conn *con;
956 
957 	hash_con = qed_fcoe_get_hash(cdev, handle);
958 	if (!hash_con) {
959 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
960 			  handle);
961 		return -EINVAL;
962 	}
963 
964 	/* Update the connection with information from the params */
965 	con = hash_con->con;
966 	con->terminate_params = terminate_params;
967 
968 	return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
969 					QED_SPQ_MODE_EBLOCK, NULL);
970 }
971 
972 static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
973 {
974 	return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
975 }
976 
977 void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
978 				 struct qed_mcp_fcoe_stats *stats)
979 {
980 	struct qed_fcoe_stats proto_stats;
981 
982 	/* Retrieve FW statistics */
983 	memset(&proto_stats, 0, sizeof(proto_stats));
984 	if (qed_fcoe_stats(cdev, &proto_stats)) {
985 		DP_VERBOSE(cdev, QED_MSG_STORAGE,
986 			   "Failed to collect FCoE statistics\n");
987 		return;
988 	}
989 
990 	/* Translate FW statistics into struct */
991 	stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
992 			 proto_stats.fcoe_rx_xfer_pkt_cnt +
993 			 proto_stats.fcoe_rx_other_pkt_cnt;
994 	stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
995 			 proto_stats.fcoe_tx_xfer_pkt_cnt +
996 			 proto_stats.fcoe_tx_other_pkt_cnt;
997 	stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
998 
999 	/* Request protocol driver to fill-in the rest */
1000 	if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
1001 		struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
1002 		void *cookie = cdev->ops_cookie;
1003 
1004 		if (ops->get_login_failures)
1005 			stats->login_failure = ops->get_login_failures(cookie);
1006 	}
1007 }
1008 
1009 static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
1010 	.common = &qed_common_ops_pass,
1011 	.ll2 = &qed_ll2_ops_pass,
1012 	.fill_dev_info = &qed_fill_fcoe_dev_info,
1013 	.start = &qed_fcoe_start,
1014 	.stop = &qed_fcoe_stop,
1015 	.register_ops = &qed_register_fcoe_ops,
1016 	.acquire_conn = &qed_fcoe_acquire_conn,
1017 	.release_conn = &qed_fcoe_release_conn,
1018 	.offload_conn = &qed_fcoe_offload_conn,
1019 	.destroy_conn = &qed_fcoe_destroy_conn,
1020 	.get_stats = &qed_fcoe_stats,
1021 };
1022 
1023 const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
1024 {
1025 	return &qed_fcoe_ops_pass;
1026 }
1027 EXPORT_SYMBOL(qed_get_fcoe_ops);
1028 
1029 void qed_put_fcoe_ops(void)
1030 {
1031 }
1032 EXPORT_SYMBOL(qed_put_fcoe_ops);
1033