1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/kernel.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/stddef.h>
41 #include <linux/workqueue.h>
42 #include <net/ipv6.h>
43 #include <linux/bitops.h>
44 #include <linux/delay.h>
45 #include <linux/errno.h>
46 #include <linux/etherdevice.h>
47 #include <linux/io.h>
48 #include <linux/list.h>
49 #include <linux/mutex.h>
50 #include <linux/spinlock.h>
51 #include <linux/string.h>
52 #include <linux/qed/qed_ll2_if.h>
53 #include "qed.h"
54 #include "qed_cxt.h"
55 #include "qed_dev_api.h"
56 #include "qed_hsi.h"
57 #include "qed_hw.h"
58 #include "qed_int.h"
59 #include "qed_ll2.h"
60 #include "qed_mcp.h"
61 #include "qed_ooo.h"
62 #include "qed_reg_addr.h"
63 #include "qed_sp.h"
64 #include "qed_rdma.h"
65 
66 #define QED_LL2_RX_REGISTERED(ll2)	((ll2)->rx_queue.b_cb_registered)
67 #define QED_LL2_TX_REGISTERED(ll2)	((ll2)->tx_queue.b_cb_registered)
68 
69 #define QED_LL2_TX_SIZE (256)
70 #define QED_LL2_RX_SIZE (4096)
71 
72 struct qed_cb_ll2_info {
73 	int rx_cnt;
74 	u32 rx_size;
75 	u8 handle;
76 
77 	/* Lock protecting LL2 buffer lists in sleepless context */
78 	spinlock_t lock;
79 	struct list_head list;
80 
81 	const struct qed_ll2_cb_ops *cbs;
82 	void *cb_cookie;
83 };
84 
85 struct qed_ll2_buffer {
86 	struct list_head list;
87 	void *data;
88 	dma_addr_t phys_addr;
89 };
90 
91 static void qed_ll2b_complete_tx_packet(void *cxt,
92 					u8 connection_handle,
93 					void *cookie,
94 					dma_addr_t first_frag_addr,
95 					bool b_last_fragment,
96 					bool b_last_packet)
97 {
98 	struct qed_hwfn *p_hwfn = cxt;
99 	struct qed_dev *cdev = p_hwfn->cdev;
100 	struct sk_buff *skb = cookie;
101 
102 	/* All we need to do is release the mapping */
103 	dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 			 skb_headlen(skb), DMA_TO_DEVICE);
105 
106 	if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 		cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
108 				      b_last_fragment);
109 
110 	dev_kfree_skb_any(skb);
111 }
112 
113 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
114 				u8 **data, dma_addr_t *phys_addr)
115 {
116 	*data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
117 	if (!(*data)) {
118 		DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
119 		return -ENOMEM;
120 	}
121 
122 	*phys_addr = dma_map_single(&cdev->pdev->dev,
123 				    ((*data) + NET_SKB_PAD),
124 				    cdev->ll2->rx_size, DMA_FROM_DEVICE);
125 	if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
126 		DP_INFO(cdev, "Failed to map LL2 buffer data\n");
127 		kfree((*data));
128 		return -ENOMEM;
129 	}
130 
131 	return 0;
132 }
133 
134 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
135 				 struct qed_ll2_buffer *buffer)
136 {
137 	spin_lock_bh(&cdev->ll2->lock);
138 
139 	dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
140 			 cdev->ll2->rx_size, DMA_FROM_DEVICE);
141 	kfree(buffer->data);
142 	list_del(&buffer->list);
143 
144 	cdev->ll2->rx_cnt--;
145 	if (!cdev->ll2->rx_cnt)
146 		DP_INFO(cdev, "All LL2 entries were removed\n");
147 
148 	spin_unlock_bh(&cdev->ll2->lock);
149 
150 	return 0;
151 }
152 
153 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
154 {
155 	struct qed_ll2_buffer *buffer, *tmp_buffer;
156 
157 	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
158 		qed_ll2_dealloc_buffer(cdev, buffer);
159 }
160 
161 static void qed_ll2b_complete_rx_packet(void *cxt,
162 					struct qed_ll2_comp_rx_data *data)
163 {
164 	struct qed_hwfn *p_hwfn = cxt;
165 	struct qed_ll2_buffer *buffer = data->cookie;
166 	struct qed_dev *cdev = p_hwfn->cdev;
167 	dma_addr_t new_phys_addr;
168 	struct sk_buff *skb;
169 	bool reuse = false;
170 	int rc = -EINVAL;
171 	u8 *new_data;
172 
173 	DP_VERBOSE(p_hwfn,
174 		   (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
175 		   "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
176 		   (u64)data->rx_buf_addr,
177 		   data->u.placement_offset,
178 		   data->length.packet_length,
179 		   data->parse_flags,
180 		   data->vlan, data->opaque_data_0, data->opaque_data_1);
181 
182 	if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
183 		print_hex_dump(KERN_INFO, "",
184 			       DUMP_PREFIX_OFFSET, 16, 1,
185 			       buffer->data, data->length.packet_length, false);
186 	}
187 
188 	/* Determine if data is valid */
189 	if (data->length.packet_length < ETH_HLEN)
190 		reuse = true;
191 
192 	/* Allocate a replacement for buffer; Reuse upon failure */
193 	if (!reuse)
194 		rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
195 					  &new_phys_addr);
196 
197 	/* If need to reuse or there's no replacement buffer, repost this */
198 	if (rc)
199 		goto out_post;
200 	dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
201 			 cdev->ll2->rx_size, DMA_FROM_DEVICE);
202 
203 	skb = build_skb(buffer->data, 0);
204 	if (!skb) {
205 		DP_INFO(cdev, "Failed to build SKB\n");
206 		kfree(buffer->data);
207 		goto out_post1;
208 	}
209 
210 	data->u.placement_offset += NET_SKB_PAD;
211 	skb_reserve(skb, data->u.placement_offset);
212 	skb_put(skb, data->length.packet_length);
213 	skb_checksum_none_assert(skb);
214 
215 	/* Get parital ethernet information instead of eth_type_trans(),
216 	 * Since we don't have an associated net_device.
217 	 */
218 	skb_reset_mac_header(skb);
219 	skb->protocol = eth_hdr(skb)->h_proto;
220 
221 	/* Pass SKB onward */
222 	if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
223 		if (data->vlan)
224 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
225 					       data->vlan);
226 		cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
227 				      data->opaque_data_0,
228 				      data->opaque_data_1);
229 	} else {
230 		DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
231 				    QED_MSG_LL2 | QED_MSG_STORAGE),
232 			   "Dropping the packet\n");
233 		kfree(buffer->data);
234 	}
235 
236 out_post1:
237 	/* Update Buffer information and update FW producer */
238 	buffer->data = new_data;
239 	buffer->phys_addr = new_phys_addr;
240 
241 out_post:
242 	rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle,
243 				    buffer->phys_addr, 0, buffer, 1);
244 	if (rc)
245 		qed_ll2_dealloc_buffer(cdev, buffer);
246 }
247 
248 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
249 						    u8 connection_handle,
250 						    bool b_lock,
251 						    bool b_only_active)
252 {
253 	struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
254 
255 	if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
256 		return NULL;
257 
258 	if (!p_hwfn->p_ll2_info)
259 		return NULL;
260 
261 	p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
262 
263 	if (b_only_active) {
264 		if (b_lock)
265 			mutex_lock(&p_ll2_conn->mutex);
266 		if (p_ll2_conn->b_active)
267 			p_ret = p_ll2_conn;
268 		if (b_lock)
269 			mutex_unlock(&p_ll2_conn->mutex);
270 	} else {
271 		p_ret = p_ll2_conn;
272 	}
273 
274 	return p_ret;
275 }
276 
277 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
278 						  u8 connection_handle)
279 {
280 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
281 }
282 
283 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
284 						       u8 connection_handle)
285 {
286 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
287 }
288 
289 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
290 							   *p_hwfn,
291 							   u8 connection_handle)
292 {
293 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
294 }
295 
296 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
297 {
298 	bool b_last_packet = false, b_last_frag = false;
299 	struct qed_ll2_tx_packet *p_pkt = NULL;
300 	struct qed_ll2_info *p_ll2_conn;
301 	struct qed_ll2_tx_queue *p_tx;
302 	unsigned long flags = 0;
303 	dma_addr_t tx_frag;
304 
305 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
306 	if (!p_ll2_conn)
307 		return;
308 
309 	p_tx = &p_ll2_conn->tx_queue;
310 
311 	spin_lock_irqsave(&p_tx->lock, flags);
312 	while (!list_empty(&p_tx->active_descq)) {
313 		p_pkt = list_first_entry(&p_tx->active_descq,
314 					 struct qed_ll2_tx_packet, list_entry);
315 		if (!p_pkt)
316 			break;
317 
318 		list_del(&p_pkt->list_entry);
319 		b_last_packet = list_empty(&p_tx->active_descq);
320 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
321 		spin_unlock_irqrestore(&p_tx->lock, flags);
322 		if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
323 			struct qed_ooo_buffer *p_buffer;
324 
325 			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
326 			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
327 						p_buffer);
328 		} else {
329 			p_tx->cur_completing_packet = *p_pkt;
330 			p_tx->cur_completing_bd_idx = 1;
331 			b_last_frag =
332 				p_tx->cur_completing_bd_idx == p_pkt->bd_used;
333 			tx_frag = p_pkt->bds_set[0].tx_frag;
334 			p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
335 						      p_ll2_conn->my_id,
336 						      p_pkt->cookie,
337 						      tx_frag,
338 						      b_last_frag,
339 						      b_last_packet);
340 		}
341 		spin_lock_irqsave(&p_tx->lock, flags);
342 	}
343 	spin_unlock_irqrestore(&p_tx->lock, flags);
344 }
345 
346 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
347 {
348 	struct qed_ll2_info *p_ll2_conn = p_cookie;
349 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
350 	u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
351 	struct qed_ll2_tx_packet *p_pkt;
352 	bool b_last_frag = false;
353 	unsigned long flags;
354 	int rc = -EINVAL;
355 
356 	spin_lock_irqsave(&p_tx->lock, flags);
357 	if (p_tx->b_completing_packet) {
358 		rc = -EBUSY;
359 		goto out;
360 	}
361 
362 	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
363 	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
364 	while (num_bds) {
365 		if (list_empty(&p_tx->active_descq))
366 			goto out;
367 
368 		p_pkt = list_first_entry(&p_tx->active_descq,
369 					 struct qed_ll2_tx_packet, list_entry);
370 		if (!p_pkt)
371 			goto out;
372 
373 		p_tx->b_completing_packet = true;
374 		p_tx->cur_completing_packet = *p_pkt;
375 		num_bds_in_packet = p_pkt->bd_used;
376 		list_del(&p_pkt->list_entry);
377 
378 		if (num_bds < num_bds_in_packet) {
379 			DP_NOTICE(p_hwfn,
380 				  "Rest of BDs does not cover whole packet\n");
381 			goto out;
382 		}
383 
384 		num_bds -= num_bds_in_packet;
385 		p_tx->bds_idx += num_bds_in_packet;
386 		while (num_bds_in_packet--)
387 			qed_chain_consume(&p_tx->txq_chain);
388 
389 		p_tx->cur_completing_bd_idx = 1;
390 		b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
391 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
392 
393 		spin_unlock_irqrestore(&p_tx->lock, flags);
394 
395 		p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
396 					   p_ll2_conn->my_id,
397 					   p_pkt->cookie,
398 					   p_pkt->bds_set[0].tx_frag,
399 					   b_last_frag, !num_bds);
400 
401 		spin_lock_irqsave(&p_tx->lock, flags);
402 	}
403 
404 	p_tx->b_completing_packet = false;
405 	rc = 0;
406 out:
407 	spin_unlock_irqrestore(&p_tx->lock, flags);
408 	return rc;
409 }
410 
411 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
412 				  union core_rx_cqe_union *p_cqe,
413 				  struct qed_ll2_comp_rx_data *data)
414 {
415 	data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
416 	data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
417 	data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
418 	data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
419 	data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
420 	data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
421 	data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
422 
423 	data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
424 }
425 
426 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
427 				  union core_rx_cqe_union *p_cqe,
428 				  struct qed_ll2_comp_rx_data *data)
429 {
430 	data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
431 	data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags);
432 	data->length.packet_length =
433 	    le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
434 	data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
435 	data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
436 	data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
437 	data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
438 }
439 
440 static int
441 qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
442 			struct qed_ll2_info *p_ll2_conn,
443 			union core_rx_cqe_union *p_cqe,
444 			unsigned long *p_lock_flags)
445 {
446 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
447 	struct core_rx_slow_path_cqe *sp_cqe;
448 
449 	sp_cqe = &p_cqe->rx_cqe_sp;
450 	if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
451 		DP_NOTICE(p_hwfn,
452 			  "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
453 			  sp_cqe->ramrod_cmd_id);
454 		return -EINVAL;
455 	}
456 
457 	if (!p_ll2_conn->cbs.slowpath_cb) {
458 		DP_NOTICE(p_hwfn,
459 			  "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
460 		return -EINVAL;
461 	}
462 
463 	spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
464 
465 	p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
466 				    p_ll2_conn->my_id,
467 				    le32_to_cpu(sp_cqe->opaque_data.data[0]),
468 				    le32_to_cpu(sp_cqe->opaque_data.data[1]));
469 
470 	spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
471 
472 	return 0;
473 }
474 
475 static int
476 qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
477 			      struct qed_ll2_info *p_ll2_conn,
478 			      union core_rx_cqe_union *p_cqe,
479 			      unsigned long *p_lock_flags, bool b_last_cqe)
480 {
481 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
482 	struct qed_ll2_rx_packet *p_pkt = NULL;
483 	struct qed_ll2_comp_rx_data data;
484 
485 	if (!list_empty(&p_rx->active_descq))
486 		p_pkt = list_first_entry(&p_rx->active_descq,
487 					 struct qed_ll2_rx_packet, list_entry);
488 	if (!p_pkt) {
489 		DP_NOTICE(p_hwfn,
490 			  "[%d] LL2 Rx completion but active_descq is empty\n",
491 			  p_ll2_conn->input.conn_type);
492 
493 		return -EIO;
494 	}
495 	list_del(&p_pkt->list_entry);
496 
497 	if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
498 		qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
499 	else
500 		qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
501 	if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
502 		DP_NOTICE(p_hwfn,
503 			  "Mismatch between active_descq and the LL2 Rx chain\n");
504 
505 	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
506 
507 	data.connection_handle = p_ll2_conn->my_id;
508 	data.cookie = p_pkt->cookie;
509 	data.rx_buf_addr = p_pkt->rx_buf_addr;
510 	data.b_last_packet = b_last_cqe;
511 
512 	spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
513 	p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
514 
515 	spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
516 
517 	return 0;
518 }
519 
520 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
521 {
522 	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
523 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
524 	union core_rx_cqe_union *cqe = NULL;
525 	u16 cq_new_idx = 0, cq_old_idx = 0;
526 	unsigned long flags = 0;
527 	int rc = 0;
528 
529 	spin_lock_irqsave(&p_rx->lock, flags);
530 	cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
531 	cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
532 
533 	while (cq_new_idx != cq_old_idx) {
534 		bool b_last_cqe = (cq_new_idx == cq_old_idx);
535 
536 		cqe =
537 		    (union core_rx_cqe_union *)
538 		    qed_chain_consume(&p_rx->rcq_chain);
539 		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
540 
541 		DP_VERBOSE(p_hwfn,
542 			   QED_MSG_LL2,
543 			   "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
544 			   cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
545 
546 		switch (cqe->rx_cqe_sp.type) {
547 		case CORE_RX_CQE_TYPE_SLOW_PATH:
548 			rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
549 						     cqe, &flags);
550 			break;
551 		case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
552 		case CORE_RX_CQE_TYPE_REGULAR:
553 			rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
554 							   cqe, &flags,
555 							   b_last_cqe);
556 			break;
557 		default:
558 			rc = -EIO;
559 		}
560 	}
561 
562 	spin_unlock_irqrestore(&p_rx->lock, flags);
563 	return rc;
564 }
565 
566 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
567 {
568 	struct qed_ll2_info *p_ll2_conn = NULL;
569 	struct qed_ll2_rx_packet *p_pkt = NULL;
570 	struct qed_ll2_rx_queue *p_rx;
571 	unsigned long flags = 0;
572 
573 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
574 	if (!p_ll2_conn)
575 		return;
576 
577 	p_rx = &p_ll2_conn->rx_queue;
578 
579 	spin_lock_irqsave(&p_rx->lock, flags);
580 	while (!list_empty(&p_rx->active_descq)) {
581 		p_pkt = list_first_entry(&p_rx->active_descq,
582 					 struct qed_ll2_rx_packet, list_entry);
583 		if (!p_pkt)
584 			break;
585 		list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
586 		spin_unlock_irqrestore(&p_rx->lock, flags);
587 
588 		if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
589 			struct qed_ooo_buffer *p_buffer;
590 
591 			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
592 			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
593 						p_buffer);
594 		} else {
595 			dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
596 			void *cookie = p_pkt->cookie;
597 			bool b_last;
598 
599 			b_last = list_empty(&p_rx->active_descq);
600 			p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
601 						      p_ll2_conn->my_id,
602 						      cookie,
603 						      rx_buf_addr, b_last);
604 		}
605 		spin_lock_irqsave(&p_rx->lock, flags);
606 	}
607 	spin_unlock_irqrestore(&p_rx->lock, flags);
608 }
609 
610 static bool
611 qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
612 				struct core_rx_slow_path_cqe *p_cqe)
613 {
614 	struct ooo_opaque *iscsi_ooo;
615 	u32 cid;
616 
617 	if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
618 		return false;
619 
620 	iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
621 	if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
622 		return false;
623 
624 	/* Need to make a flush */
625 	cid = le32_to_cpu(iscsi_ooo->cid);
626 	qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
627 
628 	return true;
629 }
630 
631 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
632 				  struct qed_ll2_info *p_ll2_conn)
633 {
634 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
635 	u16 packet_length = 0, parse_flags = 0, vlan = 0;
636 	struct qed_ll2_rx_packet *p_pkt = NULL;
637 	u32 num_ooo_add_to_peninsula = 0, cid;
638 	union core_rx_cqe_union *cqe = NULL;
639 	u16 cq_new_idx = 0, cq_old_idx = 0;
640 	struct qed_ooo_buffer *p_buffer;
641 	struct ooo_opaque *iscsi_ooo;
642 	u8 placement_offset = 0;
643 	u8 cqe_type;
644 
645 	cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
646 	cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
647 	if (cq_new_idx == cq_old_idx)
648 		return 0;
649 
650 	while (cq_new_idx != cq_old_idx) {
651 		struct core_rx_fast_path_cqe *p_cqe_fp;
652 
653 		cqe = qed_chain_consume(&p_rx->rcq_chain);
654 		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
655 		cqe_type = cqe->rx_cqe_sp.type;
656 
657 		if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
658 			if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
659 							    &cqe->rx_cqe_sp))
660 				continue;
661 
662 		if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
663 			DP_NOTICE(p_hwfn,
664 				  "Got a non-regular LB LL2 completion [type 0x%02x]\n",
665 				  cqe_type);
666 			return -EINVAL;
667 		}
668 		p_cqe_fp = &cqe->rx_cqe_fp;
669 
670 		placement_offset = p_cqe_fp->placement_offset;
671 		parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
672 		packet_length = le16_to_cpu(p_cqe_fp->packet_length);
673 		vlan = le16_to_cpu(p_cqe_fp->vlan);
674 		iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
675 		qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
676 					   iscsi_ooo);
677 		cid = le32_to_cpu(iscsi_ooo->cid);
678 
679 		/* Process delete isle first */
680 		if (iscsi_ooo->drop_size)
681 			qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
682 					     iscsi_ooo->drop_isle,
683 					     iscsi_ooo->drop_size);
684 
685 		if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
686 			continue;
687 
688 		/* Now process create/add/join isles */
689 		if (list_empty(&p_rx->active_descq)) {
690 			DP_NOTICE(p_hwfn,
691 				  "LL2 OOO RX chain has no submitted buffers\n"
692 				  );
693 			return -EIO;
694 		}
695 
696 		p_pkt = list_first_entry(&p_rx->active_descq,
697 					 struct qed_ll2_rx_packet, list_entry);
698 
699 		if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
700 		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
701 		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
702 		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
703 		    (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
704 			if (!p_pkt) {
705 				DP_NOTICE(p_hwfn,
706 					  "LL2 OOO RX packet is not valid\n");
707 				return -EIO;
708 			}
709 			list_del(&p_pkt->list_entry);
710 			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
711 			p_buffer->packet_length = packet_length;
712 			p_buffer->parse_flags = parse_flags;
713 			p_buffer->vlan = vlan;
714 			p_buffer->placement_offset = placement_offset;
715 			qed_chain_consume(&p_rx->rxq_chain);
716 			list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
717 
718 			switch (iscsi_ooo->ooo_opcode) {
719 			case TCP_EVENT_ADD_NEW_ISLE:
720 				qed_ooo_add_new_isle(p_hwfn,
721 						     p_hwfn->p_ooo_info,
722 						     cid,
723 						     iscsi_ooo->ooo_isle,
724 						     p_buffer);
725 				break;
726 			case TCP_EVENT_ADD_ISLE_RIGHT:
727 				qed_ooo_add_new_buffer(p_hwfn,
728 						       p_hwfn->p_ooo_info,
729 						       cid,
730 						       iscsi_ooo->ooo_isle,
731 						       p_buffer,
732 						       QED_OOO_RIGHT_BUF);
733 				break;
734 			case TCP_EVENT_ADD_ISLE_LEFT:
735 				qed_ooo_add_new_buffer(p_hwfn,
736 						       p_hwfn->p_ooo_info,
737 						       cid,
738 						       iscsi_ooo->ooo_isle,
739 						       p_buffer,
740 						       QED_OOO_LEFT_BUF);
741 				break;
742 			case TCP_EVENT_JOIN:
743 				qed_ooo_add_new_buffer(p_hwfn,
744 						       p_hwfn->p_ooo_info,
745 						       cid,
746 						       iscsi_ooo->ooo_isle +
747 						       1,
748 						       p_buffer,
749 						       QED_OOO_LEFT_BUF);
750 				qed_ooo_join_isles(p_hwfn,
751 						   p_hwfn->p_ooo_info,
752 						   cid, iscsi_ooo->ooo_isle);
753 				break;
754 			case TCP_EVENT_ADD_PEN:
755 				num_ooo_add_to_peninsula++;
756 				qed_ooo_put_ready_buffer(p_hwfn,
757 							 p_hwfn->p_ooo_info,
758 							 p_buffer, true);
759 				break;
760 			}
761 		} else {
762 			DP_NOTICE(p_hwfn,
763 				  "Unexpected event (%d) TX OOO completion\n",
764 				  iscsi_ooo->ooo_opcode);
765 		}
766 	}
767 
768 	return 0;
769 }
770 
771 static void
772 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
773 			  struct qed_ll2_info *p_ll2_conn)
774 {
775 	struct qed_ll2_tx_pkt_info tx_pkt;
776 	struct qed_ooo_buffer *p_buffer;
777 	u16 l4_hdr_offset_w;
778 	dma_addr_t first_frag;
779 	u8 bd_flags;
780 	int rc;
781 
782 	/* Submit Tx buffers here */
783 	while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
784 						    p_hwfn->p_ooo_info))) {
785 		l4_hdr_offset_w = 0;
786 		bd_flags = 0;
787 
788 		first_frag = p_buffer->rx_buffer_phys_addr +
789 			     p_buffer->placement_offset;
790 		SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
791 		SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
792 
793 		memset(&tx_pkt, 0, sizeof(tx_pkt));
794 		tx_pkt.num_of_bds = 1;
795 		tx_pkt.vlan = p_buffer->vlan;
796 		tx_pkt.bd_flags = bd_flags;
797 		tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
798 		switch (p_ll2_conn->tx_dest) {
799 		case CORE_TX_DEST_NW:
800 			tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
801 			break;
802 		case CORE_TX_DEST_LB:
803 			tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
804 			break;
805 		case CORE_TX_DEST_DROP:
806 		default:
807 			tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
808 			break;
809 		}
810 		tx_pkt.first_frag = first_frag;
811 		tx_pkt.first_frag_len = p_buffer->packet_length;
812 		tx_pkt.cookie = p_buffer;
813 
814 		rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
815 					       &tx_pkt, true);
816 		if (rc) {
817 			qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
818 						 p_buffer, false);
819 			break;
820 		}
821 	}
822 }
823 
824 static void
825 qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
826 			  struct qed_ll2_info *p_ll2_conn)
827 {
828 	struct qed_ooo_buffer *p_buffer;
829 	int rc;
830 
831 	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
832 						   p_hwfn->p_ooo_info))) {
833 		rc = qed_ll2_post_rx_buffer(p_hwfn,
834 					    p_ll2_conn->my_id,
835 					    p_buffer->rx_buffer_phys_addr,
836 					    0, p_buffer, true);
837 		if (rc) {
838 			qed_ooo_put_free_buffer(p_hwfn,
839 						p_hwfn->p_ooo_info, p_buffer);
840 			break;
841 		}
842 	}
843 }
844 
845 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
846 {
847 	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
848 	int rc;
849 
850 	if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
851 		return 0;
852 
853 	rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
854 	if (rc)
855 		return rc;
856 
857 	qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
858 	qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
859 
860 	return 0;
861 }
862 
863 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
864 {
865 	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
866 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
867 	struct qed_ll2_tx_packet *p_pkt = NULL;
868 	struct qed_ooo_buffer *p_buffer;
869 	bool b_dont_submit_rx = false;
870 	u16 new_idx = 0, num_bds = 0;
871 	int rc;
872 
873 	if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
874 		return 0;
875 
876 	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
877 	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
878 
879 	if (!num_bds)
880 		return 0;
881 
882 	while (num_bds) {
883 		if (list_empty(&p_tx->active_descq))
884 			return -EINVAL;
885 
886 		p_pkt = list_first_entry(&p_tx->active_descq,
887 					 struct qed_ll2_tx_packet, list_entry);
888 		if (!p_pkt)
889 			return -EINVAL;
890 
891 		if (p_pkt->bd_used != 1) {
892 			DP_NOTICE(p_hwfn,
893 				  "Unexpectedly many BDs(%d) in TX OOO completion\n",
894 				  p_pkt->bd_used);
895 			return -EINVAL;
896 		}
897 
898 		list_del(&p_pkt->list_entry);
899 
900 		num_bds--;
901 		p_tx->bds_idx++;
902 		qed_chain_consume(&p_tx->txq_chain);
903 
904 		p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
905 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
906 
907 		if (b_dont_submit_rx) {
908 			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
909 						p_buffer);
910 			continue;
911 		}
912 
913 		rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
914 					    p_buffer->rx_buffer_phys_addr, 0,
915 					    p_buffer, true);
916 		if (rc != 0) {
917 			qed_ooo_put_free_buffer(p_hwfn,
918 						p_hwfn->p_ooo_info, p_buffer);
919 			b_dont_submit_rx = true;
920 		}
921 	}
922 
923 	qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
924 
925 	return 0;
926 }
927 
928 static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn)
929 {
930 	u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
931 
932 	DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2),
933 		   "Stopping LL2 OOO queue [%02x]\n", *handle);
934 
935 	qed_ll2_terminate_connection(p_hwfn, *handle);
936 	qed_ll2_release_connection(p_hwfn, *handle);
937 	*handle = QED_LL2_UNUSED_HANDLE;
938 }
939 
940 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
941 				     struct qed_ll2_info *p_ll2_conn,
942 				     u8 action_on_error)
943 {
944 	enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
945 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
946 	struct core_rx_start_ramrod_data *p_ramrod = NULL;
947 	struct qed_spq_entry *p_ent = NULL;
948 	struct qed_sp_init_data init_data;
949 	u16 cqe_pbl_size;
950 	int rc = 0;
951 
952 	/* Get SPQ entry */
953 	memset(&init_data, 0, sizeof(init_data));
954 	init_data.cid = p_ll2_conn->cid;
955 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
956 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
957 
958 	rc = qed_sp_init_request(p_hwfn, &p_ent,
959 				 CORE_RAMROD_RX_QUEUE_START,
960 				 PROTOCOLID_CORE, &init_data);
961 	if (rc)
962 		return rc;
963 
964 	p_ramrod = &p_ent->ramrod.core_rx_queue_start;
965 	memset(p_ramrod, 0, sizeof(*p_ramrod));
966 	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
967 	p_ramrod->sb_index = p_rx->rx_sb_index;
968 	p_ramrod->complete_event_flg = 1;
969 
970 	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
971 	DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
972 	cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
973 	p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
974 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
975 		       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
976 
977 	p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
978 	p_ramrod->inner_vlan_stripping_en =
979 		p_ll2_conn->input.rx_vlan_removal_en;
980 
981 	if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
982 	    p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
983 		p_ramrod->report_outer_vlan = 1;
984 	p_ramrod->queue_id = p_ll2_conn->queue_id;
985 	p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
986 
987 	if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
988 	    p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
989 	    conn_type != QED_LL2_TYPE_IWARP) {
990 		p_ramrod->mf_si_bcast_accept_all = 1;
991 		p_ramrod->mf_si_mcast_accept_all = 1;
992 	} else {
993 		p_ramrod->mf_si_bcast_accept_all = 0;
994 		p_ramrod->mf_si_mcast_accept_all = 0;
995 	}
996 
997 	p_ramrod->action_on_error.error_type = action_on_error;
998 	p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
999 	p_ramrod->zero_prod_flg = 1;
1000 
1001 	return qed_spq_post(p_hwfn, p_ent, NULL);
1002 }
1003 
1004 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1005 				     struct qed_ll2_info *p_ll2_conn)
1006 {
1007 	enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
1008 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1009 	struct core_tx_start_ramrod_data *p_ramrod = NULL;
1010 	struct qed_spq_entry *p_ent = NULL;
1011 	struct qed_sp_init_data init_data;
1012 	u16 pq_id = 0, pbl_size;
1013 	int rc = -EINVAL;
1014 
1015 	if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1016 		return 0;
1017 
1018 	if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
1019 		p_ll2_conn->tx_stats_en = 0;
1020 	else
1021 		p_ll2_conn->tx_stats_en = 1;
1022 
1023 	/* Get SPQ entry */
1024 	memset(&init_data, 0, sizeof(init_data));
1025 	init_data.cid = p_ll2_conn->cid;
1026 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1027 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1028 
1029 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1030 				 CORE_RAMROD_TX_QUEUE_START,
1031 				 PROTOCOLID_CORE, &init_data);
1032 	if (rc)
1033 		return rc;
1034 
1035 	p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1036 
1037 	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1038 	p_ramrod->sb_index = p_tx->tx_sb_index;
1039 	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
1040 	p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1041 	p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1042 
1043 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1044 		       qed_chain_get_pbl_phys(&p_tx->txq_chain));
1045 	pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1046 	p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1047 
1048 	switch (p_ll2_conn->input.tx_tc) {
1049 	case PURE_LB_TC:
1050 		pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1051 		break;
1052 	case PKT_LB_TC:
1053 		pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
1054 		break;
1055 	default:
1056 		pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1057 		break;
1058 	}
1059 
1060 	p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1061 
1062 	switch (conn_type) {
1063 	case QED_LL2_TYPE_FCOE:
1064 		p_ramrod->conn_type = PROTOCOLID_FCOE;
1065 		break;
1066 	case QED_LL2_TYPE_ISCSI:
1067 		p_ramrod->conn_type = PROTOCOLID_ISCSI;
1068 		break;
1069 	case QED_LL2_TYPE_ROCE:
1070 		p_ramrod->conn_type = PROTOCOLID_ROCE;
1071 		break;
1072 	case QED_LL2_TYPE_IWARP:
1073 		p_ramrod->conn_type = PROTOCOLID_IWARP;
1074 		break;
1075 	case QED_LL2_TYPE_OOO:
1076 		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
1077 			p_ramrod->conn_type = PROTOCOLID_ISCSI;
1078 		else
1079 			p_ramrod->conn_type = PROTOCOLID_IWARP;
1080 		break;
1081 	default:
1082 		p_ramrod->conn_type = PROTOCOLID_ETH;
1083 		DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1084 	}
1085 
1086 	p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
1087 
1088 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1089 	if (rc)
1090 		return rc;
1091 
1092 	rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr,
1093 				 &p_tx->db_msg, DB_REC_WIDTH_32B,
1094 				 DB_REC_KERNEL);
1095 	return rc;
1096 }
1097 
1098 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1099 				    struct qed_ll2_info *p_ll2_conn)
1100 {
1101 	struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1102 	struct qed_spq_entry *p_ent = NULL;
1103 	struct qed_sp_init_data init_data;
1104 	int rc = -EINVAL;
1105 
1106 	/* Get SPQ entry */
1107 	memset(&init_data, 0, sizeof(init_data));
1108 	init_data.cid = p_ll2_conn->cid;
1109 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1110 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1111 
1112 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1113 				 CORE_RAMROD_RX_QUEUE_STOP,
1114 				 PROTOCOLID_CORE, &init_data);
1115 	if (rc)
1116 		return rc;
1117 
1118 	p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1119 
1120 	p_ramrod->complete_event_flg = 1;
1121 	p_ramrod->queue_id = p_ll2_conn->queue_id;
1122 
1123 	return qed_spq_post(p_hwfn, p_ent, NULL);
1124 }
1125 
1126 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1127 				    struct qed_ll2_info *p_ll2_conn)
1128 {
1129 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1130 	struct qed_spq_entry *p_ent = NULL;
1131 	struct qed_sp_init_data init_data;
1132 	int rc = -EINVAL;
1133 	qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
1134 
1135 	/* Get SPQ entry */
1136 	memset(&init_data, 0, sizeof(init_data));
1137 	init_data.cid = p_ll2_conn->cid;
1138 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1139 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1140 
1141 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1142 				 CORE_RAMROD_TX_QUEUE_STOP,
1143 				 PROTOCOLID_CORE, &init_data);
1144 	if (rc)
1145 		return rc;
1146 
1147 	return qed_spq_post(p_hwfn, p_ent, NULL);
1148 }
1149 
1150 static int
1151 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1152 			      struct qed_ll2_info *p_ll2_info)
1153 {
1154 	struct qed_ll2_rx_packet *p_descq;
1155 	u32 capacity;
1156 	int rc = 0;
1157 
1158 	if (!p_ll2_info->input.rx_num_desc)
1159 		goto out;
1160 
1161 	rc = qed_chain_alloc(p_hwfn->cdev,
1162 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1163 			     QED_CHAIN_MODE_NEXT_PTR,
1164 			     QED_CHAIN_CNT_TYPE_U16,
1165 			     p_ll2_info->input.rx_num_desc,
1166 			     sizeof(struct core_rx_bd),
1167 			     &p_ll2_info->rx_queue.rxq_chain, NULL);
1168 	if (rc) {
1169 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1170 		goto out;
1171 	}
1172 
1173 	capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1174 	p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1175 			  GFP_KERNEL);
1176 	if (!p_descq) {
1177 		rc = -ENOMEM;
1178 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1179 		goto out;
1180 	}
1181 	p_ll2_info->rx_queue.descq_array = p_descq;
1182 
1183 	rc = qed_chain_alloc(p_hwfn->cdev,
1184 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1185 			     QED_CHAIN_MODE_PBL,
1186 			     QED_CHAIN_CNT_TYPE_U16,
1187 			     p_ll2_info->input.rx_num_desc,
1188 			     sizeof(struct core_rx_fast_path_cqe),
1189 			     &p_ll2_info->rx_queue.rcq_chain, NULL);
1190 	if (rc) {
1191 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1192 		goto out;
1193 	}
1194 
1195 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1196 		   "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1197 		   p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
1198 
1199 out:
1200 	return rc;
1201 }
1202 
1203 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1204 					 struct qed_ll2_info *p_ll2_info)
1205 {
1206 	struct qed_ll2_tx_packet *p_descq;
1207 	u32 desc_size;
1208 	u32 capacity;
1209 	int rc = 0;
1210 
1211 	if (!p_ll2_info->input.tx_num_desc)
1212 		goto out;
1213 
1214 	rc = qed_chain_alloc(p_hwfn->cdev,
1215 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1216 			     QED_CHAIN_MODE_PBL,
1217 			     QED_CHAIN_CNT_TYPE_U16,
1218 			     p_ll2_info->input.tx_num_desc,
1219 			     sizeof(struct core_tx_bd),
1220 			     &p_ll2_info->tx_queue.txq_chain, NULL);
1221 	if (rc)
1222 		goto out;
1223 
1224 	capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1225 	/* First element is part of the packet, rest are flexibly added */
1226 	desc_size = (sizeof(*p_descq) +
1227 		     (p_ll2_info->input.tx_max_bds_per_packet - 1) *
1228 		     sizeof(p_descq->bds_set));
1229 
1230 	p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
1231 	if (!p_descq) {
1232 		rc = -ENOMEM;
1233 		goto out;
1234 	}
1235 	p_ll2_info->tx_queue.descq_mem = p_descq;
1236 
1237 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1238 		   "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1239 		   p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
1240 
1241 out:
1242 	if (rc)
1243 		DP_NOTICE(p_hwfn,
1244 			  "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1245 			  p_ll2_info->input.tx_num_desc);
1246 	return rc;
1247 }
1248 
1249 static int
1250 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1251 			       struct qed_ll2_info *p_ll2_info, u16 mtu)
1252 {
1253 	struct qed_ooo_buffer *p_buf = NULL;
1254 	void *p_virt;
1255 	u16 buf_idx;
1256 	int rc = 0;
1257 
1258 	if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
1259 		return rc;
1260 
1261 	/* Correct number of requested OOO buffers if needed */
1262 	if (!p_ll2_info->input.rx_num_ooo_buffers) {
1263 		u16 num_desc = p_ll2_info->input.rx_num_desc;
1264 
1265 		if (!num_desc)
1266 			return -EINVAL;
1267 		p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1268 	}
1269 
1270 	for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1271 	     buf_idx++) {
1272 		p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1273 		if (!p_buf) {
1274 			rc = -ENOMEM;
1275 			goto out;
1276 		}
1277 
1278 		p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1279 		p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1280 					 ETH_CACHE_LINE_SIZE - 1) &
1281 					~(ETH_CACHE_LINE_SIZE - 1);
1282 		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1283 					    p_buf->rx_buffer_size,
1284 					    &p_buf->rx_buffer_phys_addr,
1285 					    GFP_KERNEL);
1286 		if (!p_virt) {
1287 			kfree(p_buf);
1288 			rc = -ENOMEM;
1289 			goto out;
1290 		}
1291 
1292 		p_buf->rx_buffer_virt_addr = p_virt;
1293 		qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1294 	}
1295 
1296 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1297 		   "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1298 		   p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1299 
1300 out:
1301 	return rc;
1302 }
1303 
1304 static int
1305 qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
1306 {
1307 	if (!cbs || (!cbs->rx_comp_cb ||
1308 		     !cbs->rx_release_cb ||
1309 		     !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
1310 		return -EINVAL;
1311 
1312 	p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
1313 	p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
1314 	p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
1315 	p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
1316 	p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
1317 	p_ll2_info->cbs.cookie = cbs->cookie;
1318 
1319 	return 0;
1320 }
1321 
1322 static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn,
1323 					struct qed_ll2_acquire_data *data,
1324 					u8 *start_idx, u8 *last_idx)
1325 {
1326 	/* LL2 queues handles will be split as follows:
1327 	 * First will be the legacy queues, and then the ctx based.
1328 	 */
1329 	if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
1330 		*start_idx = QED_LL2_LEGACY_CONN_BASE_PF;
1331 		*last_idx = *start_idx +
1332 			QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF;
1333 	} else {
1334 		/* QED_LL2_RX_TYPE_CTX */
1335 		*start_idx = QED_LL2_CTX_CONN_BASE_PF;
1336 		*last_idx = *start_idx +
1337 			QED_MAX_NUM_OF_CTX_LL2_CONNS_PF;
1338 	}
1339 }
1340 
1341 static enum core_error_handle
1342 qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1343 {
1344 	switch (err) {
1345 	case QED_LL2_DROP_PACKET:
1346 		return LL2_DROP_PACKET;
1347 	case QED_LL2_DO_NOTHING:
1348 		return LL2_DO_NOTHING;
1349 	case QED_LL2_ASSERT:
1350 		return LL2_ASSERT;
1351 	default:
1352 		return LL2_DO_NOTHING;
1353 	}
1354 }
1355 
1356 int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
1357 {
1358 	struct qed_hwfn *p_hwfn = cxt;
1359 	qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1360 	struct qed_ll2_info *p_ll2_info = NULL;
1361 	u8 i, first_idx, last_idx, *p_tx_max;
1362 	int rc;
1363 
1364 	if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
1365 		return -EINVAL;
1366 
1367 	_qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx);
1368 
1369 	/* Find a free connection to be used */
1370 	for (i = first_idx; i < last_idx; i++) {
1371 		mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1372 		if (p_hwfn->p_ll2_info[i].b_active) {
1373 			mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1374 			continue;
1375 		}
1376 
1377 		p_hwfn->p_ll2_info[i].b_active = true;
1378 		p_ll2_info = &p_hwfn->p_ll2_info[i];
1379 		mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1380 		break;
1381 	}
1382 	if (!p_ll2_info)
1383 		return -EBUSY;
1384 
1385 	memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
1386 
1387 	switch (data->input.tx_dest) {
1388 	case QED_LL2_TX_DEST_NW:
1389 		p_ll2_info->tx_dest = CORE_TX_DEST_NW;
1390 		break;
1391 	case QED_LL2_TX_DEST_LB:
1392 		p_ll2_info->tx_dest = CORE_TX_DEST_LB;
1393 		break;
1394 	case QED_LL2_TX_DEST_DROP:
1395 		p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
1396 		break;
1397 	default:
1398 		return -EINVAL;
1399 	}
1400 
1401 	if (data->input.conn_type == QED_LL2_TYPE_OOO ||
1402 	    data->input.secondary_queue)
1403 		p_ll2_info->main_func_queue = false;
1404 	else
1405 		p_ll2_info->main_func_queue = true;
1406 
1407 	/* Correct maximum number of Tx BDs */
1408 	p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1409 	if (*p_tx_max == 0)
1410 		*p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1411 	else
1412 		*p_tx_max = min_t(u8, *p_tx_max,
1413 				  CORE_LL2_TX_MAX_BDS_PER_PACKET);
1414 
1415 	rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
1416 	if (rc) {
1417 		DP_NOTICE(p_hwfn, "Invalid callback functions\n");
1418 		goto q_allocate_fail;
1419 	}
1420 
1421 	rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
1422 	if (rc)
1423 		goto q_allocate_fail;
1424 
1425 	rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
1426 	if (rc)
1427 		goto q_allocate_fail;
1428 
1429 	rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1430 					    data->input.mtu);
1431 	if (rc)
1432 		goto q_allocate_fail;
1433 
1434 	/* Register callbacks for the Rx/Tx queues */
1435 	if (data->input.conn_type == QED_LL2_TYPE_OOO) {
1436 		comp_rx_cb = qed_ll2_lb_rxq_completion;
1437 		comp_tx_cb = qed_ll2_lb_txq_completion;
1438 	} else {
1439 		comp_rx_cb = qed_ll2_rxq_completion;
1440 		comp_tx_cb = qed_ll2_txq_completion;
1441 	}
1442 
1443 	if (data->input.rx_num_desc) {
1444 		qed_int_register_cb(p_hwfn, comp_rx_cb,
1445 				    &p_hwfn->p_ll2_info[i],
1446 				    &p_ll2_info->rx_queue.rx_sb_index,
1447 				    &p_ll2_info->rx_queue.p_fw_cons);
1448 		p_ll2_info->rx_queue.b_cb_registered = true;
1449 	}
1450 
1451 	if (data->input.tx_num_desc) {
1452 		qed_int_register_cb(p_hwfn,
1453 				    comp_tx_cb,
1454 				    &p_hwfn->p_ll2_info[i],
1455 				    &p_ll2_info->tx_queue.tx_sb_index,
1456 				    &p_ll2_info->tx_queue.p_fw_cons);
1457 		p_ll2_info->tx_queue.b_cb_registered = true;
1458 	}
1459 
1460 	*data->p_connection_handle = i;
1461 	return rc;
1462 
1463 q_allocate_fail:
1464 	qed_ll2_release_connection(p_hwfn, i);
1465 	return -ENOMEM;
1466 }
1467 
1468 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1469 					   struct qed_ll2_info *p_ll2_conn)
1470 {
1471 	enum qed_ll2_error_handle error_input;
1472 	enum core_error_handle error_mode;
1473 	u8 action_on_error = 0;
1474 	int rc;
1475 
1476 	if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1477 		return 0;
1478 
1479 	DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1480 	error_input = p_ll2_conn->input.ai_err_packet_too_big;
1481 	error_mode = qed_ll2_get_error_choice(error_input);
1482 	SET_FIELD(action_on_error,
1483 		  CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1484 	error_input = p_ll2_conn->input.ai_err_no_buf;
1485 	error_mode = qed_ll2_get_error_choice(error_input);
1486 	SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
1487 
1488 	rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1489 	if (rc)
1490 		return rc;
1491 
1492 	if (p_ll2_conn->rx_queue.ctx_based) {
1493 		rc = qed_db_recovery_add(p_hwfn->cdev,
1494 					 p_ll2_conn->rx_queue.set_prod_addr,
1495 					 &p_ll2_conn->rx_queue.db_data,
1496 					 DB_REC_WIDTH_64B, DB_REC_KERNEL);
1497 	}
1498 
1499 	return rc;
1500 }
1501 
1502 static void
1503 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1504 				 struct qed_ll2_info *p_ll2_conn)
1505 {
1506 	if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
1507 		return;
1508 
1509 	qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1510 	qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
1511 }
1512 
1513 static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
1514 					    u8 handle,
1515 					    u8 ll2_queue_type)
1516 {
1517 	u8 qid;
1518 
1519 	if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
1520 		return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle;
1521 
1522 	/* QED_LL2_RX_TYPE_CTX
1523 	 * FW distinguishes between the legacy queues (ram based) and the
1524 	 * ctx based queues by the queue_id.
1525 	 * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
1526 	 * and the queue ids above that are ctx base.
1527 	 */
1528 	qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
1529 	      MAX_NUM_LL2_RX_RAM_QUEUES;
1530 
1531 	/* See comment on the acquire connection for how the ll2
1532 	 * queues handles are divided.
1533 	 */
1534 	qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
1535 
1536 	return qid;
1537 }
1538 
1539 int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
1540 {
1541 	struct e4_core_conn_context *p_cxt;
1542 	struct qed_ll2_tx_packet *p_pkt;
1543 	struct qed_ll2_info *p_ll2_conn;
1544 	struct qed_hwfn *p_hwfn = cxt;
1545 	struct qed_ll2_rx_queue *p_rx;
1546 	struct qed_ll2_tx_queue *p_tx;
1547 	struct qed_cxt_info cxt_info;
1548 	struct qed_ptt *p_ptt;
1549 	int rc = -EINVAL;
1550 	u32 i, capacity;
1551 	u32 desc_size;
1552 	u8 qid;
1553 
1554 	p_ptt = qed_ptt_acquire(p_hwfn);
1555 	if (!p_ptt)
1556 		return -EAGAIN;
1557 
1558 	p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1559 	if (!p_ll2_conn) {
1560 		rc = -EINVAL;
1561 		goto out;
1562 	}
1563 
1564 	p_rx = &p_ll2_conn->rx_queue;
1565 	p_tx = &p_ll2_conn->tx_queue;
1566 
1567 	qed_chain_reset(&p_rx->rxq_chain);
1568 	qed_chain_reset(&p_rx->rcq_chain);
1569 	INIT_LIST_HEAD(&p_rx->active_descq);
1570 	INIT_LIST_HEAD(&p_rx->free_descq);
1571 	INIT_LIST_HEAD(&p_rx->posting_descq);
1572 	spin_lock_init(&p_rx->lock);
1573 	capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1574 	for (i = 0; i < capacity; i++)
1575 		list_add_tail(&p_rx->descq_array[i].list_entry,
1576 			      &p_rx->free_descq);
1577 	*p_rx->p_fw_cons = 0;
1578 
1579 	qed_chain_reset(&p_tx->txq_chain);
1580 	INIT_LIST_HEAD(&p_tx->active_descq);
1581 	INIT_LIST_HEAD(&p_tx->free_descq);
1582 	INIT_LIST_HEAD(&p_tx->sending_descq);
1583 	spin_lock_init(&p_tx->lock);
1584 	capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1585 	/* First element is part of the packet, rest are flexibly added */
1586 	desc_size = (sizeof(*p_pkt) +
1587 		     (p_ll2_conn->input.tx_max_bds_per_packet - 1) *
1588 		     sizeof(p_pkt->bds_set));
1589 
1590 	for (i = 0; i < capacity; i++) {
1591 		p_pkt = p_tx->descq_mem + desc_size * i;
1592 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
1593 	}
1594 	p_tx->cur_completing_bd_idx = 0;
1595 	p_tx->bds_idx = 0;
1596 	p_tx->b_completing_packet = false;
1597 	p_tx->cur_send_packet = NULL;
1598 	p_tx->cur_send_frag_num = 0;
1599 	p_tx->cur_completing_frag_num = 0;
1600 	*p_tx->p_fw_cons = 0;
1601 
1602 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1603 	if (rc)
1604 		goto out;
1605 	cxt_info.iid = p_ll2_conn->cid;
1606 	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
1607 	if (rc) {
1608 		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
1609 			  p_ll2_conn->cid);
1610 		goto out;
1611 	}
1612 
1613 	p_cxt = cxt_info.p_cxt;
1614 
1615 	memset(p_cxt, 0, sizeof(*p_cxt));
1616 
1617 	qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
1618 					 p_ll2_conn->input.rx_conn_type);
1619 	p_ll2_conn->queue_id = qid;
1620 	p_ll2_conn->tx_stats_id = qid;
1621 
1622 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1623 		   "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
1624 		   p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
1625 
1626 	if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
1627 		p_rx->set_prod_addr = p_hwfn->regview +
1628 		    GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
1629 	} else {
1630 		/* QED_LL2_RX_TYPE_CTX - using doorbell */
1631 		p_rx->ctx_based = 1;
1632 
1633 		p_rx->set_prod_addr = p_hwfn->doorbells +
1634 			p_hwfn->dpi_start_offset +
1635 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE);
1636 
1637 		/* prepare db data */
1638 		p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid);
1639 		SET_FIELD(p_rx->db_data.params,
1640 			  CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET);
1641 		SET_FIELD(p_rx->db_data.params,
1642 			  CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0);
1643 	}
1644 
1645 	p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1646 					    qed_db_addr(p_ll2_conn->cid,
1647 							DQ_DEMS_LEGACY);
1648 	/* prepare db data */
1649 	SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1650 	SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1651 	SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1652 		  DQ_XCM_CORE_TX_BD_PROD_CMD);
1653 	p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1654 
1655 	rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1656 	if (rc)
1657 		goto out;
1658 
1659 	rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1660 	if (rc)
1661 		goto out;
1662 
1663 	if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
1664 		qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
1665 
1666 	qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1667 
1668 	if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
1669 		if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1670 			qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
1671 						    QED_LLH_FILTER_ETHERTYPE,
1672 						    ETH_P_FCOE, 0);
1673 		qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
1674 					    QED_LLH_FILTER_ETHERTYPE,
1675 					    ETH_P_FIP, 0);
1676 	}
1677 
1678 out:
1679 	qed_ptt_release(p_hwfn, p_ptt);
1680 	return rc;
1681 }
1682 
1683 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1684 					     struct qed_ll2_rx_queue *p_rx,
1685 					     struct qed_ll2_rx_packet *p_curp)
1686 {
1687 	struct qed_ll2_rx_packet *p_posting_packet = NULL;
1688 	struct core_ll2_rx_prod rx_prod = { 0, 0 };
1689 	bool b_notify_fw = false;
1690 	u16 bd_prod, cq_prod;
1691 
1692 	/* This handles the flushing of already posted buffers */
1693 	while (!list_empty(&p_rx->posting_descq)) {
1694 		p_posting_packet = list_first_entry(&p_rx->posting_descq,
1695 						    struct qed_ll2_rx_packet,
1696 						    list_entry);
1697 		list_move_tail(&p_posting_packet->list_entry,
1698 			       &p_rx->active_descq);
1699 		b_notify_fw = true;
1700 	}
1701 
1702 	/* This handles the supplied packet [if there is one] */
1703 	if (p_curp) {
1704 		list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1705 		b_notify_fw = true;
1706 	}
1707 
1708 	if (!b_notify_fw)
1709 		return;
1710 
1711 	bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1712 	cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1713 	if (p_rx->ctx_based) {
1714 		/* update producer by giving a doorbell */
1715 		p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod);
1716 		p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod);
1717 		/* Make sure chain element is updated before ringing the
1718 		 * doorbell
1719 		 */
1720 		dma_wmb();
1721 		DIRECT_REG_WR64(p_rx->set_prod_addr,
1722 				*((u64 *)&p_rx->db_data));
1723 	} else {
1724 		rx_prod.bd_prod = cpu_to_le16(bd_prod);
1725 		rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1726 
1727 		/* Make sure chain element is updated before ringing the
1728 		 * doorbell
1729 		 */
1730 		dma_wmb();
1731 
1732 		DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1733 	}
1734 }
1735 
1736 int qed_ll2_post_rx_buffer(void *cxt,
1737 			   u8 connection_handle,
1738 			   dma_addr_t addr,
1739 			   u16 buf_len, void *cookie, u8 notify_fw)
1740 {
1741 	struct qed_hwfn *p_hwfn = cxt;
1742 	struct core_rx_bd_with_buff_len *p_curb = NULL;
1743 	struct qed_ll2_rx_packet *p_curp = NULL;
1744 	struct qed_ll2_info *p_ll2_conn;
1745 	struct qed_ll2_rx_queue *p_rx;
1746 	unsigned long flags;
1747 	void *p_data;
1748 	int rc = 0;
1749 
1750 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1751 	if (!p_ll2_conn)
1752 		return -EINVAL;
1753 	p_rx = &p_ll2_conn->rx_queue;
1754 
1755 	spin_lock_irqsave(&p_rx->lock, flags);
1756 	if (!list_empty(&p_rx->free_descq))
1757 		p_curp = list_first_entry(&p_rx->free_descq,
1758 					  struct qed_ll2_rx_packet, list_entry);
1759 	if (p_curp) {
1760 		if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1761 		    qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1762 			p_data = qed_chain_produce(&p_rx->rxq_chain);
1763 			p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1764 			qed_chain_produce(&p_rx->rcq_chain);
1765 		}
1766 	}
1767 
1768 	/* If we're lacking entires, let's try to flush buffers to FW */
1769 	if (!p_curp || !p_curb) {
1770 		rc = -EBUSY;
1771 		p_curp = NULL;
1772 		goto out_notify;
1773 	}
1774 
1775 	/* We have an Rx packet we can fill */
1776 	DMA_REGPAIR_LE(p_curb->addr, addr);
1777 	p_curb->buff_length = cpu_to_le16(buf_len);
1778 	p_curp->rx_buf_addr = addr;
1779 	p_curp->cookie = cookie;
1780 	p_curp->rxq_bd = p_curb;
1781 	p_curp->buf_length = buf_len;
1782 	list_del(&p_curp->list_entry);
1783 
1784 	/* Check if we only want to enqueue this packet without informing FW */
1785 	if (!notify_fw) {
1786 		list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1787 		goto out;
1788 	}
1789 
1790 out_notify:
1791 	qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1792 out:
1793 	spin_unlock_irqrestore(&p_rx->lock, flags);
1794 	return rc;
1795 }
1796 
1797 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1798 					  struct qed_ll2_tx_queue *p_tx,
1799 					  struct qed_ll2_tx_packet *p_curp,
1800 					  struct qed_ll2_tx_pkt_info *pkt,
1801 					  u8 notify_fw)
1802 {
1803 	list_del(&p_curp->list_entry);
1804 	p_curp->cookie = pkt->cookie;
1805 	p_curp->bd_used = pkt->num_of_bds;
1806 	p_curp->notify_fw = notify_fw;
1807 	p_tx->cur_send_packet = p_curp;
1808 	p_tx->cur_send_frag_num = 0;
1809 
1810 	p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1811 	p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
1812 	p_tx->cur_send_frag_num++;
1813 }
1814 
1815 static void
1816 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1817 				 struct qed_ll2_info *p_ll2,
1818 				 struct qed_ll2_tx_packet *p_curp,
1819 				 struct qed_ll2_tx_pkt_info *pkt)
1820 {
1821 	struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1822 	u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1823 	struct core_tx_bd *start_bd = NULL;
1824 	enum core_roce_flavor_type roce_flavor;
1825 	enum core_tx_dest tx_dest;
1826 	u16 bd_data = 0, frag_idx;
1827 
1828 	roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1829 							     : CORE_RROCE;
1830 
1831 	switch (pkt->tx_dest) {
1832 	case QED_LL2_TX_DEST_NW:
1833 		tx_dest = CORE_TX_DEST_NW;
1834 		break;
1835 	case QED_LL2_TX_DEST_LB:
1836 		tx_dest = CORE_TX_DEST_LB;
1837 		break;
1838 	case QED_LL2_TX_DEST_DROP:
1839 		tx_dest = CORE_TX_DEST_DROP;
1840 		break;
1841 	default:
1842 		tx_dest = CORE_TX_DEST_LB;
1843 		break;
1844 	}
1845 
1846 	start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1847 	if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
1848 	    p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
1849 		start_bd->nw_vlan_or_lb_echo =
1850 		    cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
1851 	} else {
1852 		start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
1853 		if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
1854 		    p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
1855 			pkt->remove_stag = true;
1856 	}
1857 
1858 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1859 		  cpu_to_le16(pkt->l4_hdr_offset_w));
1860 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1861 	bd_data |= pkt->bd_flags;
1862 	SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
1863 	SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
1864 	SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1865 	SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
1866 	SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
1867 	SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
1868 	SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
1869 		  !!(pkt->remove_stag));
1870 
1871 	start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
1872 	DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1873 	start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
1874 
1875 	DP_VERBOSE(p_hwfn,
1876 		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1877 		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1878 		   p_ll2->queue_id,
1879 		   p_ll2->cid,
1880 		   p_ll2->input.conn_type,
1881 		   prod_idx,
1882 		   pkt->first_frag_len,
1883 		   pkt->num_of_bds,
1884 		   le32_to_cpu(start_bd->addr.hi),
1885 		   le32_to_cpu(start_bd->addr.lo));
1886 
1887 	if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
1888 		return;
1889 
1890 	/* Need to provide the packet with additional BDs for frags */
1891 	for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1892 	     frag_idx < pkt->num_of_bds; frag_idx++) {
1893 		struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1894 
1895 		*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1896 		(*p_bd)->bd_data.as_bitfield = 0;
1897 		(*p_bd)->bitfield1 = 0;
1898 		p_curp->bds_set[frag_idx].tx_frag = 0;
1899 		p_curp->bds_set[frag_idx].frag_len = 0;
1900 	}
1901 }
1902 
1903 /* This should be called while the Txq spinlock is being held */
1904 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1905 				     struct qed_ll2_info *p_ll2_conn)
1906 {
1907 	bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1908 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1909 	struct qed_ll2_tx_packet *p_pkt = NULL;
1910 	u16 bd_prod;
1911 
1912 	/* If there are missing BDs, don't do anything now */
1913 	if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1914 	    p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1915 		return;
1916 
1917 	/* Push the current packet to the list and clean after it */
1918 	list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1919 		      &p_ll2_conn->tx_queue.sending_descq);
1920 	p_ll2_conn->tx_queue.cur_send_packet = NULL;
1921 	p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1922 
1923 	/* Notify FW of packet only if requested to */
1924 	if (!b_notify)
1925 		return;
1926 
1927 	bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1928 
1929 	while (!list_empty(&p_tx->sending_descq)) {
1930 		p_pkt = list_first_entry(&p_tx->sending_descq,
1931 					 struct qed_ll2_tx_packet, list_entry);
1932 		if (!p_pkt)
1933 			break;
1934 
1935 		list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1936 	}
1937 
1938 	p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod);
1939 
1940 	/* Make sure the BDs data is updated before ringing the doorbell */
1941 	wmb();
1942 
1943 	DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg));
1944 
1945 	DP_VERBOSE(p_hwfn,
1946 		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1947 		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1948 		   p_ll2_conn->queue_id,
1949 		   p_ll2_conn->cid,
1950 		   p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod);
1951 }
1952 
1953 int qed_ll2_prepare_tx_packet(void *cxt,
1954 			      u8 connection_handle,
1955 			      struct qed_ll2_tx_pkt_info *pkt,
1956 			      bool notify_fw)
1957 {
1958 	struct qed_hwfn *p_hwfn = cxt;
1959 	struct qed_ll2_tx_packet *p_curp = NULL;
1960 	struct qed_ll2_info *p_ll2_conn = NULL;
1961 	struct qed_ll2_tx_queue *p_tx;
1962 	struct qed_chain *p_tx_chain;
1963 	unsigned long flags;
1964 	int rc = 0;
1965 
1966 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1967 	if (!p_ll2_conn)
1968 		return -EINVAL;
1969 	p_tx = &p_ll2_conn->tx_queue;
1970 	p_tx_chain = &p_tx->txq_chain;
1971 
1972 	if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
1973 		return -EIO;
1974 
1975 	spin_lock_irqsave(&p_tx->lock, flags);
1976 	if (p_tx->cur_send_packet) {
1977 		rc = -EEXIST;
1978 		goto out;
1979 	}
1980 
1981 	/* Get entry, but only if we have tx elements for it */
1982 	if (!list_empty(&p_tx->free_descq))
1983 		p_curp = list_first_entry(&p_tx->free_descq,
1984 					  struct qed_ll2_tx_packet, list_entry);
1985 	if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
1986 		p_curp = NULL;
1987 
1988 	if (!p_curp) {
1989 		rc = -EBUSY;
1990 		goto out;
1991 	}
1992 
1993 	/* Prepare packet and BD, and perhaps send a doorbell to FW */
1994 	qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1995 
1996 	qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
1997 
1998 	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1999 
2000 out:
2001 	spin_unlock_irqrestore(&p_tx->lock, flags);
2002 	return rc;
2003 }
2004 
2005 int qed_ll2_set_fragment_of_tx_packet(void *cxt,
2006 				      u8 connection_handle,
2007 				      dma_addr_t addr, u16 nbytes)
2008 {
2009 	struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
2010 	struct qed_hwfn *p_hwfn = cxt;
2011 	struct qed_ll2_info *p_ll2_conn = NULL;
2012 	u16 cur_send_frag_num = 0;
2013 	struct core_tx_bd *p_bd;
2014 	unsigned long flags;
2015 
2016 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
2017 	if (!p_ll2_conn)
2018 		return -EINVAL;
2019 
2020 	if (!p_ll2_conn->tx_queue.cur_send_packet)
2021 		return -EINVAL;
2022 
2023 	p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
2024 	cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
2025 
2026 	if (cur_send_frag_num >= p_cur_send_packet->bd_used)
2027 		return -EINVAL;
2028 
2029 	/* Fill the BD information, and possibly notify FW */
2030 	p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
2031 	DMA_REGPAIR_LE(p_bd->addr, addr);
2032 	p_bd->nbytes = cpu_to_le16(nbytes);
2033 	p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
2034 	p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
2035 
2036 	p_ll2_conn->tx_queue.cur_send_frag_num++;
2037 
2038 	spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
2039 	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
2040 	spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
2041 
2042 	return 0;
2043 }
2044 
2045 int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
2046 {
2047 	struct qed_hwfn *p_hwfn = cxt;
2048 	struct qed_ll2_info *p_ll2_conn = NULL;
2049 	int rc = -EINVAL;
2050 	struct qed_ptt *p_ptt;
2051 
2052 	p_ptt = qed_ptt_acquire(p_hwfn);
2053 	if (!p_ptt)
2054 		return -EAGAIN;
2055 
2056 	p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
2057 	if (!p_ll2_conn) {
2058 		rc = -EINVAL;
2059 		goto out;
2060 	}
2061 
2062 	/* Stop Tx & Rx of connection, if needed */
2063 	if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
2064 		p_ll2_conn->tx_queue.b_cb_registered = false;
2065 		smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2066 		rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
2067 		if (rc)
2068 			goto out;
2069 
2070 		qed_ll2_txq_flush(p_hwfn, connection_handle);
2071 		qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
2072 	}
2073 
2074 	if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
2075 		p_ll2_conn->rx_queue.b_cb_registered = false;
2076 		smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2077 
2078 		if (p_ll2_conn->rx_queue.ctx_based)
2079 			qed_db_recovery_del(p_hwfn->cdev,
2080 					    p_ll2_conn->rx_queue.set_prod_addr,
2081 					    &p_ll2_conn->rx_queue.db_data);
2082 
2083 		rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
2084 		if (rc)
2085 			goto out;
2086 
2087 		qed_ll2_rxq_flush(p_hwfn, connection_handle);
2088 		qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
2089 	}
2090 
2091 	if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
2092 		qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
2093 
2094 	if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
2095 		if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
2096 			qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
2097 						       QED_LLH_FILTER_ETHERTYPE,
2098 						       ETH_P_FCOE, 0);
2099 		qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
2100 					       QED_LLH_FILTER_ETHERTYPE,
2101 					       ETH_P_FIP, 0);
2102 	}
2103 
2104 out:
2105 	qed_ptt_release(p_hwfn, p_ptt);
2106 	return rc;
2107 }
2108 
2109 static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
2110 					   struct qed_ll2_info *p_ll2_conn)
2111 {
2112 	struct qed_ooo_buffer *p_buffer;
2113 
2114 	if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
2115 		return;
2116 
2117 	qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
2118 	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
2119 						   p_hwfn->p_ooo_info))) {
2120 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2121 				  p_buffer->rx_buffer_size,
2122 				  p_buffer->rx_buffer_virt_addr,
2123 				  p_buffer->rx_buffer_phys_addr);
2124 		kfree(p_buffer);
2125 	}
2126 }
2127 
2128 void qed_ll2_release_connection(void *cxt, u8 connection_handle)
2129 {
2130 	struct qed_hwfn *p_hwfn = cxt;
2131 	struct qed_ll2_info *p_ll2_conn = NULL;
2132 
2133 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
2134 	if (!p_ll2_conn)
2135 		return;
2136 
2137 	kfree(p_ll2_conn->tx_queue.descq_mem);
2138 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
2139 
2140 	kfree(p_ll2_conn->rx_queue.descq_array);
2141 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
2142 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
2143 
2144 	qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
2145 
2146 	qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
2147 
2148 	mutex_lock(&p_ll2_conn->mutex);
2149 	p_ll2_conn->b_active = false;
2150 	mutex_unlock(&p_ll2_conn->mutex);
2151 }
2152 
2153 int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
2154 {
2155 	struct qed_ll2_info *p_ll2_connections;
2156 	u8 i;
2157 
2158 	/* Allocate LL2's set struct */
2159 	p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
2160 				    sizeof(struct qed_ll2_info), GFP_KERNEL);
2161 	if (!p_ll2_connections) {
2162 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
2163 		return -ENOMEM;
2164 	}
2165 
2166 	for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
2167 		p_ll2_connections[i].my_id = i;
2168 
2169 	p_hwfn->p_ll2_info = p_ll2_connections;
2170 	return 0;
2171 }
2172 
2173 void qed_ll2_setup(struct qed_hwfn *p_hwfn)
2174 {
2175 	int i;
2176 
2177 	for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
2178 		mutex_init(&p_hwfn->p_ll2_info[i].mutex);
2179 }
2180 
2181 void qed_ll2_free(struct qed_hwfn *p_hwfn)
2182 {
2183 	if (!p_hwfn->p_ll2_info)
2184 		return;
2185 
2186 	kfree(p_hwfn->p_ll2_info);
2187 	p_hwfn->p_ll2_info = NULL;
2188 }
2189 
2190 static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
2191 				    struct qed_ptt *p_ptt,
2192 				    struct qed_ll2_stats *p_stats)
2193 {
2194 	struct core_ll2_port_stats port_stats;
2195 
2196 	memset(&port_stats, 0, sizeof(port_stats));
2197 	qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
2198 			BAR0_MAP_REG_TSDM_RAM +
2199 			TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
2200 			sizeof(port_stats));
2201 
2202 	p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
2203 	p_stats->gsi_invalid_pkt_length +=
2204 	    HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
2205 	p_stats->gsi_unsupported_pkt_typ +=
2206 	    HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
2207 	p_stats->gsi_crcchksm_error +=
2208 	    HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
2209 }
2210 
2211 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
2212 				struct qed_ptt *p_ptt,
2213 				struct qed_ll2_info *p_ll2_conn,
2214 				struct qed_ll2_stats *p_stats)
2215 {
2216 	struct core_ll2_tstorm_per_queue_stat tstats;
2217 	u8 qid = p_ll2_conn->queue_id;
2218 	u32 tstats_addr;
2219 
2220 	memset(&tstats, 0, sizeof(tstats));
2221 	tstats_addr = BAR0_MAP_REG_TSDM_RAM +
2222 		      CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
2223 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
2224 
2225 	p_stats->packet_too_big_discard +=
2226 			HILO_64_REGPAIR(tstats.packet_too_big_discard);
2227 	p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard);
2228 }
2229 
2230 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
2231 				struct qed_ptt *p_ptt,
2232 				struct qed_ll2_info *p_ll2_conn,
2233 				struct qed_ll2_stats *p_stats)
2234 {
2235 	struct core_ll2_ustorm_per_queue_stat ustats;
2236 	u8 qid = p_ll2_conn->queue_id;
2237 	u32 ustats_addr;
2238 
2239 	memset(&ustats, 0, sizeof(ustats));
2240 	ustats_addr = BAR0_MAP_REG_USDM_RAM +
2241 		      CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
2242 	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
2243 
2244 	p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
2245 	p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
2246 	p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
2247 	p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
2248 	p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
2249 	p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
2250 }
2251 
2252 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
2253 				struct qed_ptt *p_ptt,
2254 				struct qed_ll2_info *p_ll2_conn,
2255 				struct qed_ll2_stats *p_stats)
2256 {
2257 	struct core_ll2_pstorm_per_queue_stat pstats;
2258 	u8 stats_id = p_ll2_conn->tx_stats_id;
2259 	u32 pstats_addr;
2260 
2261 	memset(&pstats, 0, sizeof(pstats));
2262 	pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2263 		      CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
2264 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
2265 
2266 	p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
2267 	p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
2268 	p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
2269 	p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2270 	p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2271 	p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2272 }
2273 
2274 static int __qed_ll2_get_stats(void *cxt, u8 connection_handle,
2275 			       struct qed_ll2_stats *p_stats)
2276 {
2277 	struct qed_hwfn *p_hwfn = cxt;
2278 	struct qed_ll2_info *p_ll2_conn = NULL;
2279 	struct qed_ptt *p_ptt;
2280 
2281 	if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2282 	    !p_hwfn->p_ll2_info)
2283 		return -EINVAL;
2284 
2285 	p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2286 
2287 	p_ptt = qed_ptt_acquire(p_hwfn);
2288 	if (!p_ptt) {
2289 		DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2290 		return -EINVAL;
2291 	}
2292 
2293 	if (p_ll2_conn->input.gsi_enable)
2294 		_qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
2295 
2296 	_qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2297 
2298 	_qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2299 
2300 	if (p_ll2_conn->tx_stats_en)
2301 		_qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2302 
2303 	qed_ptt_release(p_hwfn, p_ptt);
2304 
2305 	return 0;
2306 }
2307 
2308 int qed_ll2_get_stats(void *cxt,
2309 		      u8 connection_handle, struct qed_ll2_stats *p_stats)
2310 {
2311 	memset(p_stats, 0, sizeof(*p_stats));
2312 	return __qed_ll2_get_stats(cxt, connection_handle, p_stats);
2313 }
2314 
2315 static void qed_ll2b_release_rx_packet(void *cxt,
2316 				       u8 connection_handle,
2317 				       void *cookie,
2318 				       dma_addr_t rx_buf_addr,
2319 				       bool b_last_packet)
2320 {
2321 	struct qed_hwfn *p_hwfn = cxt;
2322 
2323 	qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
2324 }
2325 
2326 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2327 				    const struct qed_ll2_cb_ops *ops,
2328 				    void *cookie)
2329 {
2330 	cdev->ll2->cbs = ops;
2331 	cdev->ll2->cb_cookie = cookie;
2332 }
2333 
2334 struct qed_ll2_cbs ll2_cbs = {
2335 	.rx_comp_cb = &qed_ll2b_complete_rx_packet,
2336 	.rx_release_cb = &qed_ll2b_release_rx_packet,
2337 	.tx_comp_cb = &qed_ll2b_complete_tx_packet,
2338 	.tx_release_cb = &qed_ll2b_complete_tx_packet,
2339 };
2340 
2341 static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn,
2342 				  struct qed_ll2_acquire_data *data,
2343 				  struct qed_ll2_params *params,
2344 				  enum qed_ll2_conn_type conn_type,
2345 				  u8 *handle, bool lb)
2346 {
2347 	memset(data, 0, sizeof(*data));
2348 
2349 	data->input.conn_type = conn_type;
2350 	data->input.mtu = params->mtu;
2351 	data->input.rx_num_desc = QED_LL2_RX_SIZE;
2352 	data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2353 	data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2354 	data->input.tx_num_desc = QED_LL2_TX_SIZE;
2355 	data->p_connection_handle = handle;
2356 	data->cbs = &ll2_cbs;
2357 	ll2_cbs.cookie = p_hwfn;
2358 
2359 	if (lb) {
2360 		data->input.tx_tc = PKT_LB_TC;
2361 		data->input.tx_dest = QED_LL2_TX_DEST_LB;
2362 	} else {
2363 		data->input.tx_tc = 0;
2364 		data->input.tx_dest = QED_LL2_TX_DEST_NW;
2365 	}
2366 }
2367 
2368 static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn,
2369 			     struct qed_ll2_params *params)
2370 {
2371 	u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2372 	struct qed_ll2_acquire_data data;
2373 	int rc;
2374 
2375 	qed_ll2_set_conn_data(p_hwfn, &data, params,
2376 			      QED_LL2_TYPE_OOO, handle, true);
2377 
2378 	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2379 	if (rc) {
2380 		DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n");
2381 		goto out;
2382 	}
2383 
2384 	rc = qed_ll2_establish_connection(p_hwfn, *handle);
2385 	if (rc) {
2386 		DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n");
2387 		goto fail;
2388 	}
2389 
2390 	return 0;
2391 
2392 fail:
2393 	qed_ll2_release_connection(p_hwfn, *handle);
2394 out:
2395 	*handle = QED_LL2_UNUSED_HANDLE;
2396 	return rc;
2397 }
2398 
2399 static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
2400 {
2401 	return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
2402 		QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
2403 		(QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
2404 }
2405 
2406 static int __qed_ll2_stop(struct qed_hwfn *p_hwfn)
2407 {
2408 	struct qed_dev *cdev = p_hwfn->cdev;
2409 	int rc;
2410 
2411 	rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
2412 	if (rc)
2413 		DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2414 
2415 	qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
2416 
2417 	return rc;
2418 }
2419 
2420 static int qed_ll2_stop(struct qed_dev *cdev)
2421 {
2422 	bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2423 	struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2424 	int rc = 0, rc2 = 0;
2425 
2426 	if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2427 		return 0;
2428 
2429 	qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
2430 	eth_zero_addr(cdev->ll2_mac_address);
2431 
2432 	if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
2433 		qed_ll2_stop_ooo(p_hwfn);
2434 
2435 	/* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2436 	if (b_is_storage_eng1) {
2437 		rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev));
2438 		if (rc2)
2439 			DP_NOTICE(QED_LEADING_HWFN(cdev),
2440 				  "Failed to stop LL2 on engine 0\n");
2441 	}
2442 
2443 	rc = __qed_ll2_stop(p_hwfn);
2444 	if (rc)
2445 		DP_NOTICE(p_hwfn, "Failed to stop LL2\n");
2446 
2447 	qed_ll2_kill_buffers(cdev);
2448 
2449 	cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2450 
2451 	return rc | rc2;
2452 }
2453 
2454 static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
2455 			   struct qed_ll2_params *params)
2456 {
2457 	struct qed_ll2_buffer *buffer, *tmp_buffer;
2458 	struct qed_dev *cdev = p_hwfn->cdev;
2459 	enum qed_ll2_conn_type conn_type;
2460 	struct qed_ll2_acquire_data data;
2461 	int rc, rx_cnt;
2462 
2463 	switch (p_hwfn->hw_info.personality) {
2464 	case QED_PCI_FCOE:
2465 		conn_type = QED_LL2_TYPE_FCOE;
2466 		break;
2467 	case QED_PCI_ISCSI:
2468 		conn_type = QED_LL2_TYPE_ISCSI;
2469 		break;
2470 	case QED_PCI_ETH_ROCE:
2471 		conn_type = QED_LL2_TYPE_ROCE;
2472 		break;
2473 	default:
2474 
2475 		conn_type = QED_LL2_TYPE_TEST;
2476 	}
2477 
2478 	qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type,
2479 			      &cdev->ll2->handle, false);
2480 
2481 	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2482 	if (rc) {
2483 		DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n");
2484 		return rc;
2485 	}
2486 
2487 	rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle);
2488 	if (rc) {
2489 		DP_INFO(p_hwfn, "Failed to establish LL2 connection\n");
2490 		goto release_conn;
2491 	}
2492 
2493 	/* Post all Rx buffers to FW */
2494 	spin_lock_bh(&cdev->ll2->lock);
2495 	rx_cnt = cdev->ll2->rx_cnt;
2496 	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
2497 		rc = qed_ll2_post_rx_buffer(p_hwfn,
2498 					    cdev->ll2->handle,
2499 					    buffer->phys_addr, 0, buffer, 1);
2500 		if (rc) {
2501 			DP_INFO(p_hwfn,
2502 				"Failed to post an Rx buffer; Deleting it\n");
2503 			dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2504 					 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2505 			kfree(buffer->data);
2506 			list_del(&buffer->list);
2507 			kfree(buffer);
2508 		} else {
2509 			rx_cnt++;
2510 		}
2511 	}
2512 	spin_unlock_bh(&cdev->ll2->lock);
2513 
2514 	if (rx_cnt == cdev->ll2->rx_cnt) {
2515 		DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n");
2516 		goto terminate_conn;
2517 	}
2518 	cdev->ll2->rx_cnt = rx_cnt;
2519 
2520 	return 0;
2521 
2522 terminate_conn:
2523 	qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
2524 release_conn:
2525 	qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
2526 	return rc;
2527 }
2528 
2529 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2530 {
2531 	bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2532 	struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2533 	struct qed_ll2_buffer *buffer;
2534 	int rx_num_desc, i, rc;
2535 
2536 	if (!is_valid_ether_addr(params->ll2_mac_address)) {
2537 		DP_NOTICE(cdev, "Invalid Ethernet address\n");
2538 		return -EINVAL;
2539 	}
2540 
2541 	WARN_ON(!cdev->ll2->cbs);
2542 
2543 	/* Initialize LL2 locks & lists */
2544 	INIT_LIST_HEAD(&cdev->ll2->list);
2545 	spin_lock_init(&cdev->ll2->lock);
2546 
2547 	cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2548 			     L1_CACHE_BYTES + params->mtu;
2549 
2550 	/* Allocate memory for LL2.
2551 	 * In CMT mode, in case of a storage PF which is affintized to engine 1,
2552 	 * LL2 is started also on engine 0 and thus we need twofold buffers.
2553 	 */
2554 	rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1);
2555 	DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n",
2556 		rx_num_desc, cdev->ll2->rx_size);
2557 	for (i = 0; i < rx_num_desc; i++) {
2558 		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2559 		if (!buffer) {
2560 			DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2561 			rc = -ENOMEM;
2562 			goto err0;
2563 		}
2564 
2565 		rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2566 					  &buffer->phys_addr);
2567 		if (rc) {
2568 			kfree(buffer);
2569 			goto err0;
2570 		}
2571 
2572 		list_add_tail(&buffer->list, &cdev->ll2->list);
2573 	}
2574 
2575 	rc = __qed_ll2_start(p_hwfn, params);
2576 	if (rc) {
2577 		DP_NOTICE(cdev, "Failed to start LL2\n");
2578 		goto err0;
2579 	}
2580 
2581 	/* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
2582 	 * since broadcast/mutlicast packets are routed to engine 0.
2583 	 */
2584 	if (b_is_storage_eng1) {
2585 		rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params);
2586 		if (rc) {
2587 			DP_NOTICE(QED_LEADING_HWFN(cdev),
2588 				  "Failed to start LL2 on engine 0\n");
2589 			goto err1;
2590 		}
2591 	}
2592 
2593 	if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) {
2594 		DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2595 		rc = qed_ll2_start_ooo(p_hwfn, params);
2596 		if (rc) {
2597 			DP_NOTICE(cdev, "Failed to start OOO LL2\n");
2598 			goto err2;
2599 		}
2600 	}
2601 
2602 	rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
2603 	if (rc) {
2604 		DP_NOTICE(cdev, "Failed to add an LLH filter\n");
2605 		goto err3;
2606 	}
2607 
2608 	ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2609 
2610 	return 0;
2611 
2612 err3:
2613 	if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
2614 		qed_ll2_stop_ooo(p_hwfn);
2615 err2:
2616 	if (b_is_storage_eng1)
2617 		__qed_ll2_stop(QED_LEADING_HWFN(cdev));
2618 err1:
2619 	__qed_ll2_stop(p_hwfn);
2620 err0:
2621 	qed_ll2_kill_buffers(cdev);
2622 	cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2623 	return rc;
2624 }
2625 
2626 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2627 			      unsigned long xmit_flags)
2628 {
2629 	struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2630 	struct qed_ll2_tx_pkt_info pkt;
2631 	const skb_frag_t *frag;
2632 	u8 flags = 0, nr_frags;
2633 	int rc = -EINVAL, i;
2634 	dma_addr_t mapping;
2635 	u16 vlan = 0;
2636 
2637 	if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2638 		DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2639 		return -EINVAL;
2640 	}
2641 
2642 	/* Cache number of fragments from SKB since SKB may be freed by
2643 	 * the completion routine after calling qed_ll2_prepare_tx_packet()
2644 	 */
2645 	nr_frags = skb_shinfo(skb)->nr_frags;
2646 
2647 	if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2648 		DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2649 		       1 + nr_frags);
2650 		return -EINVAL;
2651 	}
2652 
2653 	mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2654 				 skb->len, DMA_TO_DEVICE);
2655 	if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2656 		DP_NOTICE(cdev, "SKB mapping failed\n");
2657 		return -EINVAL;
2658 	}
2659 
2660 	/* Request HW to calculate IP csum */
2661 	if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2662 	      ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2663 		flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
2664 
2665 	if (skb_vlan_tag_present(skb)) {
2666 		vlan = skb_vlan_tag_get(skb);
2667 		flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
2668 	}
2669 
2670 	memset(&pkt, 0, sizeof(pkt));
2671 	pkt.num_of_bds = 1 + nr_frags;
2672 	pkt.vlan = vlan;
2673 	pkt.bd_flags = flags;
2674 	pkt.tx_dest = QED_LL2_TX_DEST_NW;
2675 	pkt.first_frag = mapping;
2676 	pkt.first_frag_len = skb->len;
2677 	pkt.cookie = skb;
2678 	if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
2679 	    test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2680 		pkt.remove_stag = true;
2681 
2682 	/* qed_ll2_prepare_tx_packet() may actually send the packet if
2683 	 * there are no fragments in the skb and subsequently the completion
2684 	 * routine may run and free the SKB, so no dereferencing the SKB
2685 	 * beyond this point unless skb has any fragments.
2686 	 */
2687 	rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
2688 				       &pkt, 1);
2689 	if (rc)
2690 		goto err;
2691 
2692 	for (i = 0; i < nr_frags; i++) {
2693 		frag = &skb_shinfo(skb)->frags[i];
2694 
2695 		mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2696 					   skb_frag_size(frag), DMA_TO_DEVICE);
2697 
2698 		if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2699 			DP_NOTICE(cdev,
2700 				  "Unable to map frag - dropping packet\n");
2701 			rc = -ENOMEM;
2702 			goto err;
2703 		}
2704 
2705 		rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2706 						       cdev->ll2->handle,
2707 						       mapping,
2708 						       skb_frag_size(frag));
2709 
2710 		/* if failed not much to do here, partial packet has been posted
2711 		 * we can't free memory, will need to wait for completion
2712 		 */
2713 		if (rc)
2714 			goto err2;
2715 	}
2716 
2717 	return 0;
2718 
2719 err:
2720 	dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2721 err2:
2722 	return rc;
2723 }
2724 
2725 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2726 {
2727 	bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2728 	struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2729 	int rc;
2730 
2731 	if (!cdev->ll2)
2732 		return -EINVAL;
2733 
2734 	rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats);
2735 	if (rc) {
2736 		DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n");
2737 		return rc;
2738 	}
2739 
2740 	/* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2741 	if (b_is_storage_eng1) {
2742 		rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2743 					 cdev->ll2->handle, stats);
2744 		if (rc) {
2745 			DP_NOTICE(QED_LEADING_HWFN(cdev),
2746 				  "Failed to get LL2 stats on engine 0\n");
2747 			return rc;
2748 		}
2749 	}
2750 
2751 	return 0;
2752 }
2753 
2754 const struct qed_ll2_ops qed_ll2_ops_pass = {
2755 	.start = &qed_ll2_start,
2756 	.stop = &qed_ll2_stop,
2757 	.start_xmit = &qed_ll2_start_xmit,
2758 	.register_cb_ops = &qed_ll2_register_cb_ops,
2759 	.get_stats = &qed_ll2_stats,
2760 };
2761 
2762 int qed_ll2_alloc_if(struct qed_dev *cdev)
2763 {
2764 	cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2765 	return cdev->ll2 ? 0 : -ENOMEM;
2766 }
2767 
2768 void qed_ll2_dealloc_if(struct qed_dev *cdev)
2769 {
2770 	kfree(cdev->ll2);
2771 	cdev->ll2 = NULL;
2772 }
2773