1 /* QLogic qed NIC Driver
2  *
3  * Copyright (c) 2015 QLogic Corporation
4  *
5  * This software is available under the terms of the GNU General Public License
6  * (GPL) Version 2, available from the file COPYING in the main directory of
7  * this source tree.
8  */
9 
10 #include <linux/types.h>
11 #include <asm/byteorder.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/if_vlan.h>
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/stddef.h>
18 #include <linux/version.h>
19 #include <linux/workqueue.h>
20 #include <net/ipv6.h>
21 #include <linux/bitops.h>
22 #include <linux/delay.h>
23 #include <linux/errno.h>
24 #include <linux/etherdevice.h>
25 #include <linux/io.h>
26 #include <linux/list.h>
27 #include <linux/mutex.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/qed/qed_ll2_if.h>
31 #include "qed.h"
32 #include "qed_cxt.h"
33 #include "qed_dev_api.h"
34 #include "qed_hsi.h"
35 #include "qed_hw.h"
36 #include "qed_int.h"
37 #include "qed_ll2.h"
38 #include "qed_mcp.h"
39 #include "qed_reg_addr.h"
40 #include "qed_sp.h"
41 #include "qed_roce.h"
42 
43 #define QED_LL2_RX_REGISTERED(ll2)	((ll2)->rx_queue.b_cb_registred)
44 #define QED_LL2_TX_REGISTERED(ll2)	((ll2)->tx_queue.b_cb_registred)
45 
46 #define QED_LL2_TX_SIZE (256)
47 #define QED_LL2_RX_SIZE (4096)
48 
49 struct qed_cb_ll2_info {
50 	int rx_cnt;
51 	u32 rx_size;
52 	u8 handle;
53 	bool frags_mapped;
54 
55 	/* Lock protecting LL2 buffer lists in sleepless context */
56 	spinlock_t lock;
57 	struct list_head list;
58 
59 	const struct qed_ll2_cb_ops *cbs;
60 	void *cb_cookie;
61 };
62 
63 struct qed_ll2_buffer {
64 	struct list_head list;
65 	void *data;
66 	dma_addr_t phys_addr;
67 };
68 
69 static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
70 					u8 connection_handle,
71 					void *cookie,
72 					dma_addr_t first_frag_addr,
73 					bool b_last_fragment,
74 					bool b_last_packet)
75 {
76 	struct qed_dev *cdev = p_hwfn->cdev;
77 	struct sk_buff *skb = cookie;
78 
79 	/* All we need to do is release the mapping */
80 	dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
81 			 skb_headlen(skb), DMA_TO_DEVICE);
82 
83 	if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
84 		cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
85 				      b_last_fragment);
86 
87 	if (cdev->ll2->frags_mapped)
88 		/* Case where mapped frags were received, need to
89 		 * free skb with nr_frags marked as 0
90 		 */
91 		skb_shinfo(skb)->nr_frags = 0;
92 
93 	dev_kfree_skb_any(skb);
94 }
95 
96 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
97 				u8 **data, dma_addr_t *phys_addr)
98 {
99 	*data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
100 	if (!(*data)) {
101 		DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
102 		return -ENOMEM;
103 	}
104 
105 	*phys_addr = dma_map_single(&cdev->pdev->dev,
106 				    ((*data) + NET_SKB_PAD),
107 				    cdev->ll2->rx_size, DMA_FROM_DEVICE);
108 	if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
109 		DP_INFO(cdev, "Failed to map LL2 buffer data\n");
110 		kfree((*data));
111 		return -ENOMEM;
112 	}
113 
114 	return 0;
115 }
116 
117 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
118 				 struct qed_ll2_buffer *buffer)
119 {
120 	spin_lock_bh(&cdev->ll2->lock);
121 
122 	dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
123 			 cdev->ll2->rx_size, DMA_FROM_DEVICE);
124 	kfree(buffer->data);
125 	list_del(&buffer->list);
126 
127 	cdev->ll2->rx_cnt--;
128 	if (!cdev->ll2->rx_cnt)
129 		DP_INFO(cdev, "All LL2 entries were removed\n");
130 
131 	spin_unlock_bh(&cdev->ll2->lock);
132 
133 	return 0;
134 }
135 
136 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
137 {
138 	struct qed_ll2_buffer *buffer, *tmp_buffer;
139 
140 	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
141 		qed_ll2_dealloc_buffer(cdev, buffer);
142 }
143 
144 static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
145 					u8 connection_handle,
146 					struct qed_ll2_rx_packet *p_pkt,
147 					struct core_rx_fast_path_cqe *p_cqe,
148 					bool b_last_packet)
149 {
150 	u16 packet_length = le16_to_cpu(p_cqe->packet_length);
151 	struct qed_ll2_buffer *buffer = p_pkt->cookie;
152 	struct qed_dev *cdev = p_hwfn->cdev;
153 	u16 vlan = le16_to_cpu(p_cqe->vlan);
154 	u32 opaque_data_0, opaque_data_1;
155 	u8 pad = p_cqe->placement_offset;
156 	dma_addr_t new_phys_addr;
157 	struct sk_buff *skb;
158 	bool reuse = false;
159 	int rc = -EINVAL;
160 	u8 *new_data;
161 
162 	opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
163 	opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
164 
165 	DP_VERBOSE(p_hwfn,
166 		   (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
167 		   "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
168 		   (u64)p_pkt->rx_buf_addr, pad, packet_length,
169 		   le16_to_cpu(p_cqe->parse_flags.flags), vlan,
170 		   opaque_data_0, opaque_data_1);
171 
172 	if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
173 		print_hex_dump(KERN_INFO, "",
174 			       DUMP_PREFIX_OFFSET, 16, 1,
175 			       buffer->data, packet_length, false);
176 	}
177 
178 	/* Determine if data is valid */
179 	if (packet_length < ETH_HLEN)
180 		reuse = true;
181 
182 	/* Allocate a replacement for buffer; Reuse upon failure */
183 	if (!reuse)
184 		rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
185 					  &new_phys_addr);
186 
187 	/* If need to reuse or there's no replacement buffer, repost this */
188 	if (rc)
189 		goto out_post;
190 
191 	skb = build_skb(buffer->data, 0);
192 	if (!skb) {
193 		rc = -ENOMEM;
194 		goto out_post;
195 	}
196 
197 	pad += NET_SKB_PAD;
198 	skb_reserve(skb, pad);
199 	skb_put(skb, packet_length);
200 	skb_checksum_none_assert(skb);
201 
202 	/* Get parital ethernet information instead of eth_type_trans(),
203 	 * Since we don't have an associated net_device.
204 	 */
205 	skb_reset_mac_header(skb);
206 	skb->protocol = eth_hdr(skb)->h_proto;
207 
208 	/* Pass SKB onward */
209 	if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
210 		if (vlan)
211 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
212 		cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
213 				      opaque_data_0, opaque_data_1);
214 	}
215 
216 	/* Update Buffer information and update FW producer */
217 	buffer->data = new_data;
218 	buffer->phys_addr = new_phys_addr;
219 
220 out_post:
221 	rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
222 				    buffer->phys_addr, 0,  buffer, 1);
223 
224 	if (rc)
225 		qed_ll2_dealloc_buffer(cdev, buffer);
226 }
227 
228 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
229 						    u8 connection_handle,
230 						    bool b_lock,
231 						    bool b_only_active)
232 {
233 	struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
234 
235 	if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
236 		return NULL;
237 
238 	if (!p_hwfn->p_ll2_info)
239 		return NULL;
240 
241 	p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
242 
243 	if (b_only_active) {
244 		if (b_lock)
245 			mutex_lock(&p_ll2_conn->mutex);
246 		if (p_ll2_conn->b_active)
247 			p_ret = p_ll2_conn;
248 		if (b_lock)
249 			mutex_unlock(&p_ll2_conn->mutex);
250 	} else {
251 		p_ret = p_ll2_conn;
252 	}
253 
254 	return p_ret;
255 }
256 
257 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
258 						  u8 connection_handle)
259 {
260 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
261 }
262 
263 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
264 						       u8 connection_handle)
265 {
266 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
267 }
268 
269 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
270 							   *p_hwfn,
271 							   u8 connection_handle)
272 {
273 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
274 }
275 
276 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
277 {
278 	bool b_last_packet = false, b_last_frag = false;
279 	struct qed_ll2_tx_packet *p_pkt = NULL;
280 	struct qed_ll2_info *p_ll2_conn;
281 	struct qed_ll2_tx_queue *p_tx;
282 	dma_addr_t tx_frag;
283 
284 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
285 	if (!p_ll2_conn)
286 		return;
287 
288 	p_tx = &p_ll2_conn->tx_queue;
289 
290 	while (!list_empty(&p_tx->active_descq)) {
291 		p_pkt = list_first_entry(&p_tx->active_descq,
292 					 struct qed_ll2_tx_packet, list_entry);
293 		if (!p_pkt)
294 			break;
295 
296 		list_del(&p_pkt->list_entry);
297 		b_last_packet = list_empty(&p_tx->active_descq);
298 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
299 		p_tx->cur_completing_packet = *p_pkt;
300 		p_tx->cur_completing_bd_idx = 1;
301 		b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
302 		tx_frag = p_pkt->bds_set[0].tx_frag;
303 		if (p_ll2_conn->gsi_enable)
304 			qed_ll2b_release_tx_gsi_packet(p_hwfn,
305 						       p_ll2_conn->my_id,
306 						       p_pkt->cookie,
307 						       tx_frag,
308 						       b_last_frag,
309 						       b_last_packet);
310 		else
311 			qed_ll2b_complete_tx_packet(p_hwfn,
312 						    p_ll2_conn->my_id,
313 						    p_pkt->cookie,
314 						    tx_frag,
315 						    b_last_frag,
316 						    b_last_packet);
317 
318 	}
319 }
320 
321 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
322 {
323 	struct qed_ll2_info *p_ll2_conn = p_cookie;
324 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
325 	u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
326 	struct qed_ll2_tx_packet *p_pkt;
327 	bool b_last_frag = false;
328 	unsigned long flags;
329 	dma_addr_t tx_frag;
330 	int rc = -EINVAL;
331 
332 	spin_lock_irqsave(&p_tx->lock, flags);
333 	if (p_tx->b_completing_packet) {
334 		rc = -EBUSY;
335 		goto out;
336 	}
337 
338 	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
339 	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
340 	while (num_bds) {
341 		if (list_empty(&p_tx->active_descq))
342 			goto out;
343 
344 		p_pkt = list_first_entry(&p_tx->active_descq,
345 					 struct qed_ll2_tx_packet, list_entry);
346 		if (!p_pkt)
347 			goto out;
348 
349 		p_tx->b_completing_packet = true;
350 		p_tx->cur_completing_packet = *p_pkt;
351 		num_bds_in_packet = p_pkt->bd_used;
352 		list_del(&p_pkt->list_entry);
353 
354 		if (num_bds < num_bds_in_packet) {
355 			DP_NOTICE(p_hwfn,
356 				  "Rest of BDs does not cover whole packet\n");
357 			goto out;
358 		}
359 
360 		num_bds -= num_bds_in_packet;
361 		p_tx->bds_idx += num_bds_in_packet;
362 		while (num_bds_in_packet--)
363 			qed_chain_consume(&p_tx->txq_chain);
364 
365 		p_tx->cur_completing_bd_idx = 1;
366 		b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
367 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
368 
369 		spin_unlock_irqrestore(&p_tx->lock, flags);
370 		tx_frag = p_pkt->bds_set[0].tx_frag;
371 		if (p_ll2_conn->gsi_enable)
372 			qed_ll2b_complete_tx_gsi_packet(p_hwfn,
373 							p_ll2_conn->my_id,
374 							p_pkt->cookie,
375 							tx_frag,
376 							b_last_frag, !num_bds);
377 		else
378 			qed_ll2b_complete_tx_packet(p_hwfn,
379 						    p_ll2_conn->my_id,
380 						    p_pkt->cookie,
381 						    tx_frag,
382 						    b_last_frag, !num_bds);
383 		spin_lock_irqsave(&p_tx->lock, flags);
384 	}
385 
386 	p_tx->b_completing_packet = false;
387 	rc = 0;
388 out:
389 	spin_unlock_irqrestore(&p_tx->lock, flags);
390 	return rc;
391 }
392 
393 static int
394 qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
395 			   struct qed_ll2_info *p_ll2_info,
396 			   union core_rx_cqe_union *p_cqe,
397 			   unsigned long lock_flags, bool b_last_cqe)
398 {
399 	struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
400 	struct qed_ll2_rx_packet *p_pkt = NULL;
401 	u16 packet_length, parse_flags, vlan;
402 	u32 src_mac_addrhi;
403 	u16 src_mac_addrlo;
404 
405 	if (!list_empty(&p_rx->active_descq))
406 		p_pkt = list_first_entry(&p_rx->active_descq,
407 					 struct qed_ll2_rx_packet, list_entry);
408 	if (!p_pkt) {
409 		DP_NOTICE(p_hwfn,
410 			  "GSI Rx completion but active_descq is empty\n");
411 		return -EIO;
412 	}
413 
414 	list_del(&p_pkt->list_entry);
415 	parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
416 	packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
417 	vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
418 	src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
419 	src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
420 	if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
421 		DP_NOTICE(p_hwfn,
422 			  "Mismatch between active_descq and the LL2 Rx chain\n");
423 	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
424 
425 	spin_unlock_irqrestore(&p_rx->lock, lock_flags);
426 	qed_ll2b_complete_rx_gsi_packet(p_hwfn,
427 					p_ll2_info->my_id,
428 					p_pkt->cookie,
429 					p_pkt->rx_buf_addr,
430 					packet_length,
431 					p_cqe->rx_cqe_gsi.data_length_error,
432 					parse_flags,
433 					vlan,
434 					src_mac_addrhi,
435 					src_mac_addrlo, b_last_cqe);
436 	spin_lock_irqsave(&p_rx->lock, lock_flags);
437 
438 	return 0;
439 }
440 
441 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
442 				      struct qed_ll2_info *p_ll2_conn,
443 				      union core_rx_cqe_union *p_cqe,
444 				      unsigned long lock_flags,
445 				      bool b_last_cqe)
446 {
447 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
448 	struct qed_ll2_rx_packet *p_pkt = NULL;
449 
450 	if (!list_empty(&p_rx->active_descq))
451 		p_pkt = list_first_entry(&p_rx->active_descq,
452 					 struct qed_ll2_rx_packet, list_entry);
453 	if (!p_pkt) {
454 		DP_NOTICE(p_hwfn,
455 			  "LL2 Rx completion but active_descq is empty\n");
456 		return -EIO;
457 	}
458 	list_del(&p_pkt->list_entry);
459 
460 	if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
461 		DP_NOTICE(p_hwfn,
462 			  "Mismatch between active_descq and the LL2 Rx chain\n");
463 	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
464 
465 	spin_unlock_irqrestore(&p_rx->lock, lock_flags);
466 	qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
467 				    p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
468 	spin_lock_irqsave(&p_rx->lock, lock_flags);
469 
470 	return 0;
471 }
472 
473 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
474 {
475 	struct qed_ll2_info *p_ll2_conn = cookie;
476 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
477 	union core_rx_cqe_union *cqe = NULL;
478 	u16 cq_new_idx = 0, cq_old_idx = 0;
479 	unsigned long flags = 0;
480 	int rc = 0;
481 
482 	spin_lock_irqsave(&p_rx->lock, flags);
483 	cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
484 	cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
485 
486 	while (cq_new_idx != cq_old_idx) {
487 		bool b_last_cqe = (cq_new_idx == cq_old_idx);
488 
489 		cqe = qed_chain_consume(&p_rx->rcq_chain);
490 		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
491 
492 		DP_VERBOSE(p_hwfn,
493 			   QED_MSG_LL2,
494 			   "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
495 			   cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
496 
497 		switch (cqe->rx_cqe_sp.type) {
498 		case CORE_RX_CQE_TYPE_SLOW_PATH:
499 			DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
500 			rc = -EINVAL;
501 			break;
502 		case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
503 			rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
504 							cqe, flags, b_last_cqe);
505 			break;
506 		case CORE_RX_CQE_TYPE_REGULAR:
507 			rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
508 							cqe, flags, b_last_cqe);
509 			break;
510 		default:
511 			rc = -EIO;
512 		}
513 	}
514 
515 	spin_unlock_irqrestore(&p_rx->lock, flags);
516 	return rc;
517 }
518 
519 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
520 {
521 	struct qed_ll2_info *p_ll2_conn = NULL;
522 	struct qed_ll2_rx_packet *p_pkt = NULL;
523 	struct qed_ll2_rx_queue *p_rx;
524 
525 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
526 	if (!p_ll2_conn)
527 		return;
528 
529 	p_rx = &p_ll2_conn->rx_queue;
530 
531 	while (!list_empty(&p_rx->active_descq)) {
532 		dma_addr_t rx_buf_addr;
533 		void *cookie;
534 		bool b_last;
535 
536 		p_pkt = list_first_entry(&p_rx->active_descq,
537 					 struct qed_ll2_rx_packet, list_entry);
538 		if (!p_pkt)
539 			break;
540 
541 		list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
542 
543 		rx_buf_addr = p_pkt->rx_buf_addr;
544 		cookie = p_pkt->cookie;
545 
546 		b_last = list_empty(&p_rx->active_descq);
547 	}
548 }
549 
550 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
551 				     struct qed_ll2_info *p_ll2_conn,
552 				     u8 action_on_error)
553 {
554 	enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
555 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
556 	struct core_rx_start_ramrod_data *p_ramrod = NULL;
557 	struct qed_spq_entry *p_ent = NULL;
558 	struct qed_sp_init_data init_data;
559 	u16 cqe_pbl_size;
560 	int rc = 0;
561 
562 	/* Get SPQ entry */
563 	memset(&init_data, 0, sizeof(init_data));
564 	init_data.cid = p_ll2_conn->cid;
565 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
566 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
567 
568 	rc = qed_sp_init_request(p_hwfn, &p_ent,
569 				 CORE_RAMROD_RX_QUEUE_START,
570 				 PROTOCOLID_CORE, &init_data);
571 	if (rc)
572 		return rc;
573 
574 	p_ramrod = &p_ent->ramrod.core_rx_queue_start;
575 
576 	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
577 	p_ramrod->sb_index = p_rx->rx_sb_index;
578 	p_ramrod->complete_event_flg = 1;
579 
580 	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
581 	DMA_REGPAIR_LE(p_ramrod->bd_base,
582 		       p_rx->rxq_chain.p_phys_addr);
583 	cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
584 	p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
585 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
586 		       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
587 
588 	p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
589 	p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
590 	p_ramrod->queue_id = p_ll2_conn->queue_id;
591 	p_ramrod->main_func_queue = 1;
592 
593 	if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
594 	    p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
595 		p_ramrod->mf_si_bcast_accept_all = 1;
596 		p_ramrod->mf_si_mcast_accept_all = 1;
597 	} else {
598 		p_ramrod->mf_si_bcast_accept_all = 0;
599 		p_ramrod->mf_si_mcast_accept_all = 0;
600 	}
601 
602 	p_ramrod->action_on_error.error_type = action_on_error;
603 	p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
604 	return qed_spq_post(p_hwfn, p_ent, NULL);
605 }
606 
607 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
608 				     struct qed_ll2_info *p_ll2_conn)
609 {
610 	enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
611 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
612 	struct core_tx_start_ramrod_data *p_ramrod = NULL;
613 	struct qed_spq_entry *p_ent = NULL;
614 	struct qed_sp_init_data init_data;
615 	union qed_qm_pq_params pq_params;
616 	u16 pq_id = 0, pbl_size;
617 	int rc = -EINVAL;
618 
619 	if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
620 		return 0;
621 
622 	/* Get SPQ entry */
623 	memset(&init_data, 0, sizeof(init_data));
624 	init_data.cid = p_ll2_conn->cid;
625 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
626 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
627 
628 	rc = qed_sp_init_request(p_hwfn, &p_ent,
629 				 CORE_RAMROD_TX_QUEUE_START,
630 				 PROTOCOLID_CORE, &init_data);
631 	if (rc)
632 		return rc;
633 
634 	p_ramrod = &p_ent->ramrod.core_tx_queue_start;
635 
636 	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
637 	p_ramrod->sb_index = p_tx->tx_sb_index;
638 	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
639 	p_ll2_conn->tx_stats_en = 1;
640 	p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
641 	p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
642 
643 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
644 		       qed_chain_get_pbl_phys(&p_tx->txq_chain));
645 	pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
646 	p_ramrod->pbl_size = cpu_to_le16(pbl_size);
647 
648 	memset(&pq_params, 0, sizeof(pq_params));
649 	pq_params.core.tc = p_ll2_conn->tx_tc;
650 	pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
651 	p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
652 
653 	switch (conn_type) {
654 	case QED_LL2_TYPE_ISCSI:
655 	case QED_LL2_TYPE_ISCSI_OOO:
656 		p_ramrod->conn_type = PROTOCOLID_ISCSI;
657 		break;
658 	case QED_LL2_TYPE_ROCE:
659 		p_ramrod->conn_type = PROTOCOLID_ROCE;
660 		break;
661 	default:
662 		p_ramrod->conn_type = PROTOCOLID_ETH;
663 		DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
664 	}
665 
666 	p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
667 	return qed_spq_post(p_hwfn, p_ent, NULL);
668 }
669 
670 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
671 				    struct qed_ll2_info *p_ll2_conn)
672 {
673 	struct core_rx_stop_ramrod_data *p_ramrod = NULL;
674 	struct qed_spq_entry *p_ent = NULL;
675 	struct qed_sp_init_data init_data;
676 	int rc = -EINVAL;
677 
678 	/* Get SPQ entry */
679 	memset(&init_data, 0, sizeof(init_data));
680 	init_data.cid = p_ll2_conn->cid;
681 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
682 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
683 
684 	rc = qed_sp_init_request(p_hwfn, &p_ent,
685 				 CORE_RAMROD_RX_QUEUE_STOP,
686 				 PROTOCOLID_CORE, &init_data);
687 	if (rc)
688 		return rc;
689 
690 	p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
691 
692 	p_ramrod->complete_event_flg = 1;
693 	p_ramrod->queue_id = p_ll2_conn->queue_id;
694 
695 	return qed_spq_post(p_hwfn, p_ent, NULL);
696 }
697 
698 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
699 				    struct qed_ll2_info *p_ll2_conn)
700 {
701 	struct qed_spq_entry *p_ent = NULL;
702 	struct qed_sp_init_data init_data;
703 	int rc = -EINVAL;
704 
705 	/* Get SPQ entry */
706 	memset(&init_data, 0, sizeof(init_data));
707 	init_data.cid = p_ll2_conn->cid;
708 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
709 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
710 
711 	rc = qed_sp_init_request(p_hwfn, &p_ent,
712 				 CORE_RAMROD_TX_QUEUE_STOP,
713 				 PROTOCOLID_CORE, &init_data);
714 	if (rc)
715 		return rc;
716 
717 	return qed_spq_post(p_hwfn, p_ent, NULL);
718 }
719 
720 static int
721 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
722 			      struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
723 {
724 	struct qed_ll2_rx_packet *p_descq;
725 	u32 capacity;
726 	int rc = 0;
727 
728 	if (!rx_num_desc)
729 		goto out;
730 
731 	rc = qed_chain_alloc(p_hwfn->cdev,
732 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
733 			     QED_CHAIN_MODE_NEXT_PTR,
734 			     QED_CHAIN_CNT_TYPE_U16,
735 			     rx_num_desc,
736 			     sizeof(struct core_rx_bd),
737 			     &p_ll2_info->rx_queue.rxq_chain);
738 	if (rc) {
739 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
740 		goto out;
741 	}
742 
743 	capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
744 	p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
745 			  GFP_KERNEL);
746 	if (!p_descq) {
747 		rc = -ENOMEM;
748 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
749 		goto out;
750 	}
751 	p_ll2_info->rx_queue.descq_array = p_descq;
752 
753 	rc = qed_chain_alloc(p_hwfn->cdev,
754 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
755 			     QED_CHAIN_MODE_PBL,
756 			     QED_CHAIN_CNT_TYPE_U16,
757 			     rx_num_desc,
758 			     sizeof(struct core_rx_fast_path_cqe),
759 			     &p_ll2_info->rx_queue.rcq_chain);
760 	if (rc) {
761 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
762 		goto out;
763 	}
764 
765 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
766 		   "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
767 		   p_ll2_info->conn_type, rx_num_desc);
768 
769 out:
770 	return rc;
771 }
772 
773 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
774 					 struct qed_ll2_info *p_ll2_info,
775 					 u16 tx_num_desc)
776 {
777 	struct qed_ll2_tx_packet *p_descq;
778 	u32 capacity;
779 	int rc = 0;
780 
781 	if (!tx_num_desc)
782 		goto out;
783 
784 	rc = qed_chain_alloc(p_hwfn->cdev,
785 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
786 			     QED_CHAIN_MODE_PBL,
787 			     QED_CHAIN_CNT_TYPE_U16,
788 			     tx_num_desc,
789 			     sizeof(struct core_tx_bd),
790 			     &p_ll2_info->tx_queue.txq_chain);
791 	if (rc)
792 		goto out;
793 
794 	capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
795 	p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
796 			  GFP_KERNEL);
797 	if (!p_descq) {
798 		rc = -ENOMEM;
799 		goto out;
800 	}
801 	p_ll2_info->tx_queue.descq_array = p_descq;
802 
803 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
804 		   "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
805 		   p_ll2_info->conn_type, tx_num_desc);
806 
807 out:
808 	if (rc)
809 		DP_NOTICE(p_hwfn,
810 			  "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
811 			  tx_num_desc);
812 	return rc;
813 }
814 
815 int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
816 			       struct qed_ll2_info *p_params,
817 			       u16 rx_num_desc,
818 			       u16 tx_num_desc,
819 			       u8 *p_connection_handle)
820 {
821 	qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
822 	struct qed_ll2_info *p_ll2_info = NULL;
823 	int rc;
824 	u8 i;
825 
826 	if (!p_connection_handle || !p_hwfn->p_ll2_info)
827 		return -EINVAL;
828 
829 	/* Find a free connection to be used */
830 	for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
831 		mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
832 		if (p_hwfn->p_ll2_info[i].b_active) {
833 			mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
834 			continue;
835 		}
836 
837 		p_hwfn->p_ll2_info[i].b_active = true;
838 		p_ll2_info = &p_hwfn->p_ll2_info[i];
839 		mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
840 		break;
841 	}
842 	if (!p_ll2_info)
843 		return -EBUSY;
844 
845 	p_ll2_info->conn_type = p_params->conn_type;
846 	p_ll2_info->mtu = p_params->mtu;
847 	p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
848 	p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
849 	p_ll2_info->tx_tc = p_params->tx_tc;
850 	p_ll2_info->tx_dest = p_params->tx_dest;
851 	p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
852 	p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
853 	p_ll2_info->gsi_enable = p_params->gsi_enable;
854 
855 	rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
856 	if (rc)
857 		goto q_allocate_fail;
858 
859 	rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
860 	if (rc)
861 		goto q_allocate_fail;
862 
863 	/* Register callbacks for the Rx/Tx queues */
864 	comp_rx_cb = qed_ll2_rxq_completion;
865 	comp_tx_cb = qed_ll2_txq_completion;
866 
867 	if (rx_num_desc) {
868 		qed_int_register_cb(p_hwfn, comp_rx_cb,
869 				    &p_hwfn->p_ll2_info[i],
870 				    &p_ll2_info->rx_queue.rx_sb_index,
871 				    &p_ll2_info->rx_queue.p_fw_cons);
872 		p_ll2_info->rx_queue.b_cb_registred = true;
873 	}
874 
875 	if (tx_num_desc) {
876 		qed_int_register_cb(p_hwfn,
877 				    comp_tx_cb,
878 				    &p_hwfn->p_ll2_info[i],
879 				    &p_ll2_info->tx_queue.tx_sb_index,
880 				    &p_ll2_info->tx_queue.p_fw_cons);
881 		p_ll2_info->tx_queue.b_cb_registred = true;
882 	}
883 
884 	*p_connection_handle = i;
885 	return rc;
886 
887 q_allocate_fail:
888 	qed_ll2_release_connection(p_hwfn, i);
889 	return -ENOMEM;
890 }
891 
892 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
893 					   struct qed_ll2_info *p_ll2_conn)
894 {
895 	u8 action_on_error = 0;
896 
897 	if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
898 		return 0;
899 
900 	DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
901 
902 	SET_FIELD(action_on_error,
903 		  CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
904 		  p_ll2_conn->ai_err_packet_too_big);
905 	SET_FIELD(action_on_error,
906 		  CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
907 
908 	return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
909 }
910 
911 int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
912 {
913 	struct qed_ll2_info *p_ll2_conn;
914 	struct qed_ll2_rx_queue *p_rx;
915 	struct qed_ll2_tx_queue *p_tx;
916 	int rc = -EINVAL;
917 	u32 i, capacity;
918 	u8 qid;
919 
920 	p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
921 	if (!p_ll2_conn)
922 		return -EINVAL;
923 	p_rx = &p_ll2_conn->rx_queue;
924 	p_tx = &p_ll2_conn->tx_queue;
925 
926 	qed_chain_reset(&p_rx->rxq_chain);
927 	qed_chain_reset(&p_rx->rcq_chain);
928 	INIT_LIST_HEAD(&p_rx->active_descq);
929 	INIT_LIST_HEAD(&p_rx->free_descq);
930 	INIT_LIST_HEAD(&p_rx->posting_descq);
931 	spin_lock_init(&p_rx->lock);
932 	capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
933 	for (i = 0; i < capacity; i++)
934 		list_add_tail(&p_rx->descq_array[i].list_entry,
935 			      &p_rx->free_descq);
936 	*p_rx->p_fw_cons = 0;
937 
938 	qed_chain_reset(&p_tx->txq_chain);
939 	INIT_LIST_HEAD(&p_tx->active_descq);
940 	INIT_LIST_HEAD(&p_tx->free_descq);
941 	INIT_LIST_HEAD(&p_tx->sending_descq);
942 	spin_lock_init(&p_tx->lock);
943 	capacity = qed_chain_get_capacity(&p_tx->txq_chain);
944 	for (i = 0; i < capacity; i++)
945 		list_add_tail(&p_tx->descq_array[i].list_entry,
946 			      &p_tx->free_descq);
947 	p_tx->cur_completing_bd_idx = 0;
948 	p_tx->bds_idx = 0;
949 	p_tx->b_completing_packet = false;
950 	p_tx->cur_send_packet = NULL;
951 	p_tx->cur_send_frag_num = 0;
952 	p_tx->cur_completing_frag_num = 0;
953 	*p_tx->p_fw_cons = 0;
954 
955 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
956 
957 	qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
958 	p_ll2_conn->queue_id = qid;
959 	p_ll2_conn->tx_stats_id = qid;
960 	p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
961 					    GTT_BAR0_MAP_REG_TSDM_RAM +
962 					    TSTORM_LL2_RX_PRODS_OFFSET(qid);
963 	p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
964 					    qed_db_addr(p_ll2_conn->cid,
965 							DQ_DEMS_LEGACY);
966 
967 	rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
968 	if (rc)
969 		return rc;
970 
971 	rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
972 	if (rc)
973 		return rc;
974 
975 	if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
976 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
977 
978 	return rc;
979 }
980 
981 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
982 					     struct qed_ll2_rx_queue *p_rx,
983 					     struct qed_ll2_rx_packet *p_curp)
984 {
985 	struct qed_ll2_rx_packet *p_posting_packet = NULL;
986 	struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
987 	bool b_notify_fw = false;
988 	u16 bd_prod, cq_prod;
989 
990 	/* This handles the flushing of already posted buffers */
991 	while (!list_empty(&p_rx->posting_descq)) {
992 		p_posting_packet = list_first_entry(&p_rx->posting_descq,
993 						    struct qed_ll2_rx_packet,
994 						    list_entry);
995 		list_move_tail(&p_posting_packet->list_entry,
996 			       &p_rx->active_descq);
997 		b_notify_fw = true;
998 	}
999 
1000 	/* This handles the supplied packet [if there is one] */
1001 	if (p_curp) {
1002 		list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1003 		b_notify_fw = true;
1004 	}
1005 
1006 	if (!b_notify_fw)
1007 		return;
1008 
1009 	bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1010 	cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1011 	rx_prod.bd_prod = cpu_to_le16(bd_prod);
1012 	rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1013 	DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1014 }
1015 
1016 int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1017 			   u8 connection_handle,
1018 			   dma_addr_t addr,
1019 			   u16 buf_len, void *cookie, u8 notify_fw)
1020 {
1021 	struct core_rx_bd_with_buff_len *p_curb = NULL;
1022 	struct qed_ll2_rx_packet *p_curp = NULL;
1023 	struct qed_ll2_info *p_ll2_conn;
1024 	struct qed_ll2_rx_queue *p_rx;
1025 	unsigned long flags;
1026 	void *p_data;
1027 	int rc = 0;
1028 
1029 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1030 	if (!p_ll2_conn)
1031 		return -EINVAL;
1032 	p_rx = &p_ll2_conn->rx_queue;
1033 
1034 	spin_lock_irqsave(&p_rx->lock, flags);
1035 	if (!list_empty(&p_rx->free_descq))
1036 		p_curp = list_first_entry(&p_rx->free_descq,
1037 					  struct qed_ll2_rx_packet, list_entry);
1038 	if (p_curp) {
1039 		if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1040 		    qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1041 			p_data = qed_chain_produce(&p_rx->rxq_chain);
1042 			p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1043 			qed_chain_produce(&p_rx->rcq_chain);
1044 		}
1045 	}
1046 
1047 	/* If we're lacking entires, let's try to flush buffers to FW */
1048 	if (!p_curp || !p_curb) {
1049 		rc = -EBUSY;
1050 		p_curp = NULL;
1051 		goto out_notify;
1052 	}
1053 
1054 	/* We have an Rx packet we can fill */
1055 	DMA_REGPAIR_LE(p_curb->addr, addr);
1056 	p_curb->buff_length = cpu_to_le16(buf_len);
1057 	p_curp->rx_buf_addr = addr;
1058 	p_curp->cookie = cookie;
1059 	p_curp->rxq_bd = p_curb;
1060 	p_curp->buf_length = buf_len;
1061 	list_del(&p_curp->list_entry);
1062 
1063 	/* Check if we only want to enqueue this packet without informing FW */
1064 	if (!notify_fw) {
1065 		list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1066 		goto out;
1067 	}
1068 
1069 out_notify:
1070 	qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1071 out:
1072 	spin_unlock_irqrestore(&p_rx->lock, flags);
1073 	return rc;
1074 }
1075 
1076 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1077 					  struct qed_ll2_tx_queue *p_tx,
1078 					  struct qed_ll2_tx_packet *p_curp,
1079 					  u8 num_of_bds,
1080 					  dma_addr_t first_frag,
1081 					  u16 first_frag_len, void *p_cookie,
1082 					  u8 notify_fw)
1083 {
1084 	list_del(&p_curp->list_entry);
1085 	p_curp->cookie = p_cookie;
1086 	p_curp->bd_used = num_of_bds;
1087 	p_curp->notify_fw = notify_fw;
1088 	p_tx->cur_send_packet = p_curp;
1089 	p_tx->cur_send_frag_num = 0;
1090 
1091 	p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1092 	p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1093 	p_tx->cur_send_frag_num++;
1094 }
1095 
1096 static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1097 					     struct qed_ll2_info *p_ll2,
1098 					     struct qed_ll2_tx_packet *p_curp,
1099 					     u8 num_of_bds,
1100 					     enum core_tx_dest tx_dest,
1101 					     u16 vlan,
1102 					     u8 bd_flags,
1103 					     u16 l4_hdr_offset_w,
1104 					     enum core_roce_flavor_type type,
1105 					     dma_addr_t first_frag,
1106 					     u16 first_frag_len)
1107 {
1108 	struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1109 	u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1110 	struct core_tx_bd *start_bd = NULL;
1111 	u16 frag_idx;
1112 
1113 	start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1114 	start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1115 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1116 		  cpu_to_le16(l4_hdr_offset_w));
1117 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1118 	start_bd->bd_flags.as_bitfield = bd_flags;
1119 	start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1120 	    CORE_TX_BD_FLAGS_START_BD_SHIFT;
1121 	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
1122 	DMA_REGPAIR_LE(start_bd->addr, first_frag);
1123 	start_bd->nbytes = cpu_to_le16(first_frag_len);
1124 
1125 	DP_VERBOSE(p_hwfn,
1126 		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1127 		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1128 		   p_ll2->queue_id,
1129 		   p_ll2->cid,
1130 		   p_ll2->conn_type,
1131 		   prod_idx,
1132 		   first_frag_len,
1133 		   num_of_bds,
1134 		   le32_to_cpu(start_bd->addr.hi),
1135 		   le32_to_cpu(start_bd->addr.lo));
1136 
1137 	if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1138 		return;
1139 
1140 	/* Need to provide the packet with additional BDs for frags */
1141 	for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1142 	     frag_idx < num_of_bds; frag_idx++) {
1143 		struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1144 
1145 		*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1146 		(*p_bd)->bd_flags.as_bitfield = 0;
1147 		(*p_bd)->bitfield1 = 0;
1148 		(*p_bd)->bitfield0 = 0;
1149 		p_curp->bds_set[frag_idx].tx_frag = 0;
1150 		p_curp->bds_set[frag_idx].frag_len = 0;
1151 	}
1152 }
1153 
1154 /* This should be called while the Txq spinlock is being held */
1155 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1156 				     struct qed_ll2_info *p_ll2_conn)
1157 {
1158 	bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1159 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1160 	struct qed_ll2_tx_packet *p_pkt = NULL;
1161 	struct core_db_data db_msg = { 0, 0, 0 };
1162 	u16 bd_prod;
1163 
1164 	/* If there are missing BDs, don't do anything now */
1165 	if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1166 	    p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1167 		return;
1168 
1169 	/* Push the current packet to the list and clean after it */
1170 	list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1171 		      &p_ll2_conn->tx_queue.sending_descq);
1172 	p_ll2_conn->tx_queue.cur_send_packet = NULL;
1173 	p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1174 
1175 	/* Notify FW of packet only if requested to */
1176 	if (!b_notify)
1177 		return;
1178 
1179 	bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1180 
1181 	while (!list_empty(&p_tx->sending_descq)) {
1182 		p_pkt = list_first_entry(&p_tx->sending_descq,
1183 					 struct qed_ll2_tx_packet, list_entry);
1184 		if (!p_pkt)
1185 			break;
1186 
1187 		list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1188 	}
1189 
1190 	SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1191 	SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1192 	SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1193 		  DQ_XCM_CORE_TX_BD_PROD_CMD);
1194 	db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1195 	db_msg.spq_prod = cpu_to_le16(bd_prod);
1196 
1197 	/* Make sure the BDs data is updated before ringing the doorbell */
1198 	wmb();
1199 
1200 	DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1201 
1202 	DP_VERBOSE(p_hwfn,
1203 		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1204 		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1205 		   p_ll2_conn->queue_id,
1206 		   p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
1207 }
1208 
1209 int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1210 			      u8 connection_handle,
1211 			      u8 num_of_bds,
1212 			      u16 vlan,
1213 			      u8 bd_flags,
1214 			      u16 l4_hdr_offset_w,
1215 			      enum qed_ll2_roce_flavor_type qed_roce_flavor,
1216 			      dma_addr_t first_frag,
1217 			      u16 first_frag_len, void *cookie, u8 notify_fw)
1218 {
1219 	struct qed_ll2_tx_packet *p_curp = NULL;
1220 	struct qed_ll2_info *p_ll2_conn = NULL;
1221 	enum core_roce_flavor_type roce_flavor;
1222 	struct qed_ll2_tx_queue *p_tx;
1223 	struct qed_chain *p_tx_chain;
1224 	unsigned long flags;
1225 	int rc = 0;
1226 
1227 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1228 	if (!p_ll2_conn)
1229 		return -EINVAL;
1230 	p_tx = &p_ll2_conn->tx_queue;
1231 	p_tx_chain = &p_tx->txq_chain;
1232 
1233 	if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1234 		return -EIO;
1235 
1236 	spin_lock_irqsave(&p_tx->lock, flags);
1237 	if (p_tx->cur_send_packet) {
1238 		rc = -EEXIST;
1239 		goto out;
1240 	}
1241 
1242 	/* Get entry, but only if we have tx elements for it */
1243 	if (!list_empty(&p_tx->free_descq))
1244 		p_curp = list_first_entry(&p_tx->free_descq,
1245 					  struct qed_ll2_tx_packet, list_entry);
1246 	if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1247 		p_curp = NULL;
1248 
1249 	if (!p_curp) {
1250 		rc = -EBUSY;
1251 		goto out;
1252 	}
1253 
1254 	if (qed_roce_flavor == QED_LL2_ROCE) {
1255 		roce_flavor = CORE_ROCE;
1256 	} else if (qed_roce_flavor == QED_LL2_RROCE) {
1257 		roce_flavor = CORE_RROCE;
1258 	} else {
1259 		rc = -EINVAL;
1260 		goto out;
1261 	}
1262 
1263 	/* Prepare packet and BD, and perhaps send a doorbell to FW */
1264 	qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1265 				      num_of_bds, first_frag,
1266 				      first_frag_len, cookie, notify_fw);
1267 	qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
1268 					 num_of_bds, CORE_TX_DEST_NW,
1269 					 vlan, bd_flags, l4_hdr_offset_w,
1270 					 roce_flavor,
1271 					 first_frag, first_frag_len);
1272 
1273 	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1274 
1275 out:
1276 	spin_unlock_irqrestore(&p_tx->lock, flags);
1277 	return rc;
1278 }
1279 
1280 int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1281 				      u8 connection_handle,
1282 				      dma_addr_t addr, u16 nbytes)
1283 {
1284 	struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1285 	struct qed_ll2_info *p_ll2_conn = NULL;
1286 	u16 cur_send_frag_num = 0;
1287 	struct core_tx_bd *p_bd;
1288 	unsigned long flags;
1289 
1290 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1291 	if (!p_ll2_conn)
1292 		return -EINVAL;
1293 
1294 	if (!p_ll2_conn->tx_queue.cur_send_packet)
1295 		return -EINVAL;
1296 
1297 	p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1298 	cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1299 
1300 	if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1301 		return -EINVAL;
1302 
1303 	/* Fill the BD information, and possibly notify FW */
1304 	p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1305 	DMA_REGPAIR_LE(p_bd->addr, addr);
1306 	p_bd->nbytes = cpu_to_le16(nbytes);
1307 	p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1308 	p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1309 
1310 	p_ll2_conn->tx_queue.cur_send_frag_num++;
1311 
1312 	spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1313 	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1314 	spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1315 
1316 	return 0;
1317 }
1318 
1319 int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1320 {
1321 	struct qed_ll2_info *p_ll2_conn = NULL;
1322 	int rc = -EINVAL;
1323 
1324 	p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1325 	if (!p_ll2_conn)
1326 		return -EINVAL;
1327 
1328 	/* Stop Tx & Rx of connection, if needed */
1329 	if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1330 		rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1331 		if (rc)
1332 			return rc;
1333 		qed_ll2_txq_flush(p_hwfn, connection_handle);
1334 	}
1335 
1336 	if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1337 		rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1338 		if (rc)
1339 			return rc;
1340 		qed_ll2_rxq_flush(p_hwfn, connection_handle);
1341 	}
1342 
1343 	return rc;
1344 }
1345 
1346 void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1347 {
1348 	struct qed_ll2_info *p_ll2_conn = NULL;
1349 
1350 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1351 	if (!p_ll2_conn)
1352 		return;
1353 
1354 	if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1355 		p_ll2_conn->rx_queue.b_cb_registred = false;
1356 		qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1357 	}
1358 
1359 	if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1360 		p_ll2_conn->tx_queue.b_cb_registred = false;
1361 		qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1362 	}
1363 
1364 	kfree(p_ll2_conn->tx_queue.descq_array);
1365 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1366 
1367 	kfree(p_ll2_conn->rx_queue.descq_array);
1368 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1369 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1370 
1371 	qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1372 
1373 	mutex_lock(&p_ll2_conn->mutex);
1374 	p_ll2_conn->b_active = false;
1375 	mutex_unlock(&p_ll2_conn->mutex);
1376 }
1377 
1378 struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1379 {
1380 	struct qed_ll2_info *p_ll2_connections;
1381 	u8 i;
1382 
1383 	/* Allocate LL2's set struct */
1384 	p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1385 				    sizeof(struct qed_ll2_info), GFP_KERNEL);
1386 	if (!p_ll2_connections) {
1387 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1388 		return NULL;
1389 	}
1390 
1391 	for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1392 		p_ll2_connections[i].my_id = i;
1393 
1394 	return p_ll2_connections;
1395 }
1396 
1397 void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1398 		   struct qed_ll2_info *p_ll2_connections)
1399 {
1400 	int i;
1401 
1402 	for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1403 		mutex_init(&p_ll2_connections[i].mutex);
1404 }
1405 
1406 void qed_ll2_free(struct qed_hwfn *p_hwfn,
1407 		  struct qed_ll2_info *p_ll2_connections)
1408 {
1409 	kfree(p_ll2_connections);
1410 }
1411 
1412 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1413 				struct qed_ptt *p_ptt,
1414 				struct qed_ll2_info *p_ll2_conn,
1415 				struct qed_ll2_stats *p_stats)
1416 {
1417 	struct core_ll2_tstorm_per_queue_stat tstats;
1418 	u8 qid = p_ll2_conn->queue_id;
1419 	u32 tstats_addr;
1420 
1421 	memset(&tstats, 0, sizeof(tstats));
1422 	tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1423 		      CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1424 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1425 
1426 	p_stats->packet_too_big_discard =
1427 			HILO_64_REGPAIR(tstats.packet_too_big_discard);
1428 	p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1429 }
1430 
1431 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1432 				struct qed_ptt *p_ptt,
1433 				struct qed_ll2_info *p_ll2_conn,
1434 				struct qed_ll2_stats *p_stats)
1435 {
1436 	struct core_ll2_ustorm_per_queue_stat ustats;
1437 	u8 qid = p_ll2_conn->queue_id;
1438 	u32 ustats_addr;
1439 
1440 	memset(&ustats, 0, sizeof(ustats));
1441 	ustats_addr = BAR0_MAP_REG_USDM_RAM +
1442 		      CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1443 	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1444 
1445 	p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1446 	p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1447 	p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1448 	p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1449 	p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1450 	p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1451 }
1452 
1453 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1454 				struct qed_ptt *p_ptt,
1455 				struct qed_ll2_info *p_ll2_conn,
1456 				struct qed_ll2_stats *p_stats)
1457 {
1458 	struct core_ll2_pstorm_per_queue_stat pstats;
1459 	u8 stats_id = p_ll2_conn->tx_stats_id;
1460 	u32 pstats_addr;
1461 
1462 	memset(&pstats, 0, sizeof(pstats));
1463 	pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1464 		      CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1465 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1466 
1467 	p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1468 	p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1469 	p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1470 	p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1471 	p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1472 	p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1473 }
1474 
1475 int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1476 		      u8 connection_handle, struct qed_ll2_stats *p_stats)
1477 {
1478 	struct qed_ll2_info *p_ll2_conn = NULL;
1479 	struct qed_ptt *p_ptt;
1480 
1481 	memset(p_stats, 0, sizeof(*p_stats));
1482 
1483 	if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1484 	    !p_hwfn->p_ll2_info)
1485 		return -EINVAL;
1486 
1487 	p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
1488 
1489 	p_ptt = qed_ptt_acquire(p_hwfn);
1490 	if (!p_ptt) {
1491 		DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1492 		return -EINVAL;
1493 	}
1494 
1495 	_qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1496 	_qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1497 	if (p_ll2_conn->tx_stats_en)
1498 		_qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1499 
1500 	qed_ptt_release(p_hwfn, p_ptt);
1501 	return 0;
1502 }
1503 
1504 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
1505 				    const struct qed_ll2_cb_ops *ops,
1506 				    void *cookie)
1507 {
1508 	cdev->ll2->cbs = ops;
1509 	cdev->ll2->cb_cookie = cookie;
1510 }
1511 
1512 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
1513 {
1514 	struct qed_ll2_info ll2_info;
1515 	struct qed_ll2_buffer *buffer, *tmp_buffer;
1516 	enum qed_ll2_conn_type conn_type;
1517 	struct qed_ptt *p_ptt;
1518 	int rc, i;
1519 
1520 	/* Initialize LL2 locks & lists */
1521 	INIT_LIST_HEAD(&cdev->ll2->list);
1522 	spin_lock_init(&cdev->ll2->lock);
1523 	cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
1524 			     L1_CACHE_BYTES + params->mtu;
1525 	cdev->ll2->frags_mapped = params->frags_mapped;
1526 
1527 	/*Allocate memory for LL2 */
1528 	DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
1529 		cdev->ll2->rx_size);
1530 	for (i = 0; i < QED_LL2_RX_SIZE; i++) {
1531 		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1532 		if (!buffer) {
1533 			DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
1534 			goto fail;
1535 		}
1536 
1537 		rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
1538 					  &buffer->phys_addr);
1539 		if (rc) {
1540 			kfree(buffer);
1541 			goto fail;
1542 		}
1543 
1544 		list_add_tail(&buffer->list, &cdev->ll2->list);
1545 	}
1546 
1547 	switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
1548 	case QED_PCI_ISCSI:
1549 		conn_type = QED_LL2_TYPE_ISCSI;
1550 		break;
1551 	case QED_PCI_ETH_ROCE:
1552 		conn_type = QED_LL2_TYPE_ROCE;
1553 		break;
1554 	default:
1555 		conn_type = QED_LL2_TYPE_TEST;
1556 	}
1557 
1558 	/* Prepare the temporary ll2 information */
1559 	memset(&ll2_info, 0, sizeof(ll2_info));
1560 	ll2_info.conn_type = conn_type;
1561 	ll2_info.mtu = params->mtu;
1562 	ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
1563 	ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
1564 	ll2_info.tx_tc = 0;
1565 	ll2_info.tx_dest = CORE_TX_DEST_NW;
1566 	ll2_info.gsi_enable = 1;
1567 
1568 	rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
1569 					QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
1570 					&cdev->ll2->handle);
1571 	if (rc) {
1572 		DP_INFO(cdev, "Failed to acquire LL2 connection\n");
1573 		goto fail;
1574 	}
1575 
1576 	rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
1577 					  cdev->ll2->handle);
1578 	if (rc) {
1579 		DP_INFO(cdev, "Failed to establish LL2 connection\n");
1580 		goto release_fail;
1581 	}
1582 
1583 	/* Post all Rx buffers to FW */
1584 	spin_lock_bh(&cdev->ll2->lock);
1585 	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
1586 		rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
1587 					    cdev->ll2->handle,
1588 					    buffer->phys_addr, 0, buffer, 1);
1589 		if (rc) {
1590 			DP_INFO(cdev,
1591 				"Failed to post an Rx buffer; Deleting it\n");
1592 			dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
1593 					 cdev->ll2->rx_size, DMA_FROM_DEVICE);
1594 			kfree(buffer->data);
1595 			list_del(&buffer->list);
1596 			kfree(buffer);
1597 		} else {
1598 			cdev->ll2->rx_cnt++;
1599 		}
1600 	}
1601 	spin_unlock_bh(&cdev->ll2->lock);
1602 
1603 	if (!cdev->ll2->rx_cnt) {
1604 		DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
1605 		goto release_terminate;
1606 	}
1607 
1608 	if (!is_valid_ether_addr(params->ll2_mac_address)) {
1609 		DP_INFO(cdev, "Invalid Ethernet address\n");
1610 		goto release_terminate;
1611 	}
1612 
1613 	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1614 	if (!p_ptt) {
1615 		DP_INFO(cdev, "Failed to acquire PTT\n");
1616 		goto release_terminate;
1617 	}
1618 
1619 	rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
1620 				    params->ll2_mac_address);
1621 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
1622 	if (rc) {
1623 		DP_ERR(cdev, "Failed to allocate LLH filter\n");
1624 		goto release_terminate_all;
1625 	}
1626 
1627 	ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
1628 
1629 	return 0;
1630 
1631 release_terminate_all:
1632 
1633 release_terminate:
1634 	qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1635 release_fail:
1636 	qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1637 fail:
1638 	qed_ll2_kill_buffers(cdev);
1639 	cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
1640 	return -EINVAL;
1641 }
1642 
1643 static int qed_ll2_stop(struct qed_dev *cdev)
1644 {
1645 	struct qed_ptt *p_ptt;
1646 	int rc;
1647 
1648 	if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
1649 		return 0;
1650 
1651 	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1652 	if (!p_ptt) {
1653 		DP_INFO(cdev, "Failed to acquire PTT\n");
1654 		goto fail;
1655 	}
1656 
1657 	qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
1658 				  cdev->ll2_mac_address);
1659 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
1660 	eth_zero_addr(cdev->ll2_mac_address);
1661 
1662 	rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
1663 					  cdev->ll2->handle);
1664 	if (rc)
1665 		DP_INFO(cdev, "Failed to terminate LL2 connection\n");
1666 
1667 	qed_ll2_kill_buffers(cdev);
1668 
1669 	qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1670 	cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
1671 
1672 	return rc;
1673 fail:
1674 	return -EINVAL;
1675 }
1676 
1677 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
1678 {
1679 	const skb_frag_t *frag;
1680 	int rc = -EINVAL, i;
1681 	dma_addr_t mapping;
1682 	u16 vlan = 0;
1683 	u8 flags = 0;
1684 
1685 	if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
1686 		DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
1687 		return -EINVAL;
1688 	}
1689 
1690 	if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
1691 		DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
1692 		       1 + skb_shinfo(skb)->nr_frags);
1693 		return -EINVAL;
1694 	}
1695 
1696 	mapping = dma_map_single(&cdev->pdev->dev, skb->data,
1697 				 skb->len, DMA_TO_DEVICE);
1698 	if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
1699 		DP_NOTICE(cdev, "SKB mapping failed\n");
1700 		return -EINVAL;
1701 	}
1702 
1703 	/* Request HW to calculate IP csum */
1704 	if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
1705 	      ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1706 		flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
1707 
1708 	if (skb_vlan_tag_present(skb)) {
1709 		vlan = skb_vlan_tag_get(skb);
1710 		flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
1711 	}
1712 
1713 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
1714 				       cdev->ll2->handle,
1715 				       1 + skb_shinfo(skb)->nr_frags,
1716 				       vlan, flags, 0, 0 /* RoCE FLAVOR */,
1717 				       mapping, skb->len, skb, 1);
1718 	if (rc)
1719 		goto err;
1720 
1721 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1722 		frag = &skb_shinfo(skb)->frags[i];
1723 		if (!cdev->ll2->frags_mapped) {
1724 			mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
1725 						   skb_frag_size(frag),
1726 						   DMA_TO_DEVICE);
1727 
1728 			if (unlikely(dma_mapping_error(&cdev->pdev->dev,
1729 						       mapping))) {
1730 				DP_NOTICE(cdev,
1731 					  "Unable to map frag - dropping packet\n");
1732 				goto err;
1733 			}
1734 		} else {
1735 			mapping = page_to_phys(skb_frag_page(frag)) |
1736 			    frag->page_offset;
1737 		}
1738 
1739 		rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
1740 						       cdev->ll2->handle,
1741 						       mapping,
1742 						       skb_frag_size(frag));
1743 
1744 		/* if failed not much to do here, partial packet has been posted
1745 		 * we can't free memory, will need to wait for completion.
1746 		 */
1747 		if (rc)
1748 			goto err2;
1749 	}
1750 
1751 	return 0;
1752 
1753 err:
1754 	dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
1755 
1756 err2:
1757 	return rc;
1758 }
1759 
1760 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
1761 {
1762 	if (!cdev->ll2)
1763 		return -EINVAL;
1764 
1765 	return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
1766 				 cdev->ll2->handle, stats);
1767 }
1768 
1769 const struct qed_ll2_ops qed_ll2_ops_pass = {
1770 	.start = &qed_ll2_start,
1771 	.stop = &qed_ll2_stop,
1772 	.start_xmit = &qed_ll2_start_xmit,
1773 	.register_cb_ops = &qed_ll2_register_cb_ops,
1774 	.get_stats = &qed_ll2_stats,
1775 };
1776 
1777 int qed_ll2_alloc_if(struct qed_dev *cdev)
1778 {
1779 	cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
1780 	return cdev->ll2 ? 0 : -ENOMEM;
1781 }
1782 
1783 void qed_ll2_dealloc_if(struct qed_dev *cdev)
1784 {
1785 	kfree(cdev->ll2);
1786 	cdev->ll2 = NULL;
1787 }
1788