1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/kernel.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/stddef.h>
41 #include <linux/version.h>
42 #include <linux/workqueue.h>
43 #include <net/ipv6.h>
44 #include <linux/bitops.h>
45 #include <linux/delay.h>
46 #include <linux/errno.h>
47 #include <linux/etherdevice.h>
48 #include <linux/io.h>
49 #include <linux/list.h>
50 #include <linux/mutex.h>
51 #include <linux/spinlock.h>
52 #include <linux/string.h>
53 #include <linux/qed/qed_ll2_if.h>
54 #include "qed.h"
55 #include "qed_cxt.h"
56 #include "qed_dev_api.h"
57 #include "qed_hsi.h"
58 #include "qed_hw.h"
59 #include "qed_int.h"
60 #include "qed_ll2.h"
61 #include "qed_mcp.h"
62 #include "qed_ooo.h"
63 #include "qed_reg_addr.h"
64 #include "qed_sp.h"
65 #include "qed_roce.h"
66 
67 #define QED_LL2_RX_REGISTERED(ll2)	((ll2)->rx_queue.b_cb_registred)
68 #define QED_LL2_TX_REGISTERED(ll2)	((ll2)->tx_queue.b_cb_registred)
69 
70 #define QED_LL2_TX_SIZE (256)
71 #define QED_LL2_RX_SIZE (4096)
72 
73 struct qed_cb_ll2_info {
74 	int rx_cnt;
75 	u32 rx_size;
76 	u8 handle;
77 	bool frags_mapped;
78 
79 	/* Lock protecting LL2 buffer lists in sleepless context */
80 	spinlock_t lock;
81 	struct list_head list;
82 
83 	const struct qed_ll2_cb_ops *cbs;
84 	void *cb_cookie;
85 };
86 
87 struct qed_ll2_buffer {
88 	struct list_head list;
89 	void *data;
90 	dma_addr_t phys_addr;
91 };
92 
93 static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
94 					u8 connection_handle,
95 					void *cookie,
96 					dma_addr_t first_frag_addr,
97 					bool b_last_fragment,
98 					bool b_last_packet)
99 {
100 	struct qed_dev *cdev = p_hwfn->cdev;
101 	struct sk_buff *skb = cookie;
102 
103 	/* All we need to do is release the mapping */
104 	dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
105 			 skb_headlen(skb), DMA_TO_DEVICE);
106 
107 	if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
108 		cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
109 				      b_last_fragment);
110 
111 	if (cdev->ll2->frags_mapped)
112 		/* Case where mapped frags were received, need to
113 		 * free skb with nr_frags marked as 0
114 		 */
115 		skb_shinfo(skb)->nr_frags = 0;
116 
117 	dev_kfree_skb_any(skb);
118 }
119 
120 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
121 				u8 **data, dma_addr_t *phys_addr)
122 {
123 	*data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
124 	if (!(*data)) {
125 		DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
126 		return -ENOMEM;
127 	}
128 
129 	*phys_addr = dma_map_single(&cdev->pdev->dev,
130 				    ((*data) + NET_SKB_PAD),
131 				    cdev->ll2->rx_size, DMA_FROM_DEVICE);
132 	if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
133 		DP_INFO(cdev, "Failed to map LL2 buffer data\n");
134 		kfree((*data));
135 		return -ENOMEM;
136 	}
137 
138 	return 0;
139 }
140 
141 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
142 				 struct qed_ll2_buffer *buffer)
143 {
144 	spin_lock_bh(&cdev->ll2->lock);
145 
146 	dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
147 			 cdev->ll2->rx_size, DMA_FROM_DEVICE);
148 	kfree(buffer->data);
149 	list_del(&buffer->list);
150 
151 	cdev->ll2->rx_cnt--;
152 	if (!cdev->ll2->rx_cnt)
153 		DP_INFO(cdev, "All LL2 entries were removed\n");
154 
155 	spin_unlock_bh(&cdev->ll2->lock);
156 
157 	return 0;
158 }
159 
160 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
161 {
162 	struct qed_ll2_buffer *buffer, *tmp_buffer;
163 
164 	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
165 		qed_ll2_dealloc_buffer(cdev, buffer);
166 }
167 
168 static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
169 					u8 connection_handle,
170 					struct qed_ll2_rx_packet *p_pkt,
171 					struct core_rx_fast_path_cqe *p_cqe,
172 					bool b_last_packet)
173 {
174 	u16 packet_length = le16_to_cpu(p_cqe->packet_length);
175 	struct qed_ll2_buffer *buffer = p_pkt->cookie;
176 	struct qed_dev *cdev = p_hwfn->cdev;
177 	u16 vlan = le16_to_cpu(p_cqe->vlan);
178 	u32 opaque_data_0, opaque_data_1;
179 	u8 pad = p_cqe->placement_offset;
180 	dma_addr_t new_phys_addr;
181 	struct sk_buff *skb;
182 	bool reuse = false;
183 	int rc = -EINVAL;
184 	u8 *new_data;
185 
186 	opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
187 	opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
188 
189 	DP_VERBOSE(p_hwfn,
190 		   (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
191 		   "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
192 		   (u64)p_pkt->rx_buf_addr, pad, packet_length,
193 		   le16_to_cpu(p_cqe->parse_flags.flags), vlan,
194 		   opaque_data_0, opaque_data_1);
195 
196 	if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
197 		print_hex_dump(KERN_INFO, "",
198 			       DUMP_PREFIX_OFFSET, 16, 1,
199 			       buffer->data, packet_length, false);
200 	}
201 
202 	/* Determine if data is valid */
203 	if (packet_length < ETH_HLEN)
204 		reuse = true;
205 
206 	/* Allocate a replacement for buffer; Reuse upon failure */
207 	if (!reuse)
208 		rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
209 					  &new_phys_addr);
210 
211 	/* If need to reuse or there's no replacement buffer, repost this */
212 	if (rc)
213 		goto out_post;
214 	dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
215 			 cdev->ll2->rx_size, DMA_FROM_DEVICE);
216 
217 	skb = build_skb(buffer->data, 0);
218 	if (!skb) {
219 		rc = -ENOMEM;
220 		goto out_post;
221 	}
222 
223 	pad += NET_SKB_PAD;
224 	skb_reserve(skb, pad);
225 	skb_put(skb, packet_length);
226 	skb_checksum_none_assert(skb);
227 
228 	/* Get parital ethernet information instead of eth_type_trans(),
229 	 * Since we don't have an associated net_device.
230 	 */
231 	skb_reset_mac_header(skb);
232 	skb->protocol = eth_hdr(skb)->h_proto;
233 
234 	/* Pass SKB onward */
235 	if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
236 		if (vlan)
237 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
238 		cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
239 				      opaque_data_0, opaque_data_1);
240 	}
241 
242 	/* Update Buffer information and update FW producer */
243 	buffer->data = new_data;
244 	buffer->phys_addr = new_phys_addr;
245 
246 out_post:
247 	rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
248 				    buffer->phys_addr, 0,  buffer, 1);
249 
250 	if (rc)
251 		qed_ll2_dealloc_buffer(cdev, buffer);
252 }
253 
254 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
255 						    u8 connection_handle,
256 						    bool b_lock,
257 						    bool b_only_active)
258 {
259 	struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
260 
261 	if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
262 		return NULL;
263 
264 	if (!p_hwfn->p_ll2_info)
265 		return NULL;
266 
267 	p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
268 
269 	if (b_only_active) {
270 		if (b_lock)
271 			mutex_lock(&p_ll2_conn->mutex);
272 		if (p_ll2_conn->b_active)
273 			p_ret = p_ll2_conn;
274 		if (b_lock)
275 			mutex_unlock(&p_ll2_conn->mutex);
276 	} else {
277 		p_ret = p_ll2_conn;
278 	}
279 
280 	return p_ret;
281 }
282 
283 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
284 						  u8 connection_handle)
285 {
286 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
287 }
288 
289 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
290 						       u8 connection_handle)
291 {
292 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
293 }
294 
295 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
296 							   *p_hwfn,
297 							   u8 connection_handle)
298 {
299 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
300 }
301 
302 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
303 {
304 	bool b_last_packet = false, b_last_frag = false;
305 	struct qed_ll2_tx_packet *p_pkt = NULL;
306 	struct qed_ll2_info *p_ll2_conn;
307 	struct qed_ll2_tx_queue *p_tx;
308 	dma_addr_t tx_frag;
309 
310 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
311 	if (!p_ll2_conn)
312 		return;
313 
314 	p_tx = &p_ll2_conn->tx_queue;
315 
316 	while (!list_empty(&p_tx->active_descq)) {
317 		p_pkt = list_first_entry(&p_tx->active_descq,
318 					 struct qed_ll2_tx_packet, list_entry);
319 		if (!p_pkt)
320 			break;
321 
322 		list_del(&p_pkt->list_entry);
323 		b_last_packet = list_empty(&p_tx->active_descq);
324 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
325 		if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
326 			struct qed_ooo_buffer *p_buffer;
327 
328 			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
329 			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
330 						p_buffer);
331 		} else {
332 			p_tx->cur_completing_packet = *p_pkt;
333 			p_tx->cur_completing_bd_idx = 1;
334 			b_last_frag =
335 				p_tx->cur_completing_bd_idx == p_pkt->bd_used;
336 			tx_frag = p_pkt->bds_set[0].tx_frag;
337 			if (p_ll2_conn->conn.gsi_enable)
338 				qed_ll2b_release_tx_gsi_packet(p_hwfn,
339 							       p_ll2_conn->
340 							       my_id,
341 							       p_pkt->cookie,
342 							       tx_frag,
343 							       b_last_frag,
344 							       b_last_packet);
345 			else
346 				qed_ll2b_complete_tx_packet(p_hwfn,
347 							    p_ll2_conn->my_id,
348 							    p_pkt->cookie,
349 							    tx_frag,
350 							    b_last_frag,
351 							    b_last_packet);
352 		}
353 	}
354 }
355 
356 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
357 {
358 	struct qed_ll2_info *p_ll2_conn = p_cookie;
359 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
360 	u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
361 	struct qed_ll2_tx_packet *p_pkt;
362 	bool b_last_frag = false;
363 	unsigned long flags;
364 	dma_addr_t tx_frag;
365 	int rc = -EINVAL;
366 
367 	spin_lock_irqsave(&p_tx->lock, flags);
368 	if (p_tx->b_completing_packet) {
369 		rc = -EBUSY;
370 		goto out;
371 	}
372 
373 	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
374 	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
375 	while (num_bds) {
376 		if (list_empty(&p_tx->active_descq))
377 			goto out;
378 
379 		p_pkt = list_first_entry(&p_tx->active_descq,
380 					 struct qed_ll2_tx_packet, list_entry);
381 		if (!p_pkt)
382 			goto out;
383 
384 		p_tx->b_completing_packet = true;
385 		p_tx->cur_completing_packet = *p_pkt;
386 		num_bds_in_packet = p_pkt->bd_used;
387 		list_del(&p_pkt->list_entry);
388 
389 		if (num_bds < num_bds_in_packet) {
390 			DP_NOTICE(p_hwfn,
391 				  "Rest of BDs does not cover whole packet\n");
392 			goto out;
393 		}
394 
395 		num_bds -= num_bds_in_packet;
396 		p_tx->bds_idx += num_bds_in_packet;
397 		while (num_bds_in_packet--)
398 			qed_chain_consume(&p_tx->txq_chain);
399 
400 		p_tx->cur_completing_bd_idx = 1;
401 		b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
402 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
403 
404 		spin_unlock_irqrestore(&p_tx->lock, flags);
405 		tx_frag = p_pkt->bds_set[0].tx_frag;
406 		if (p_ll2_conn->conn.gsi_enable)
407 			qed_ll2b_complete_tx_gsi_packet(p_hwfn,
408 							p_ll2_conn->my_id,
409 							p_pkt->cookie,
410 							tx_frag,
411 							b_last_frag, !num_bds);
412 		else
413 			qed_ll2b_complete_tx_packet(p_hwfn,
414 						    p_ll2_conn->my_id,
415 						    p_pkt->cookie,
416 						    tx_frag,
417 						    b_last_frag, !num_bds);
418 		spin_lock_irqsave(&p_tx->lock, flags);
419 	}
420 
421 	p_tx->b_completing_packet = false;
422 	rc = 0;
423 out:
424 	spin_unlock_irqrestore(&p_tx->lock, flags);
425 	return rc;
426 }
427 
428 static int
429 qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
430 			   struct qed_ll2_info *p_ll2_info,
431 			   union core_rx_cqe_union *p_cqe,
432 			   unsigned long lock_flags, bool b_last_cqe)
433 {
434 	struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
435 	struct qed_ll2_rx_packet *p_pkt = NULL;
436 	u16 packet_length, parse_flags, vlan;
437 	u32 src_mac_addrhi;
438 	u16 src_mac_addrlo;
439 
440 	if (!list_empty(&p_rx->active_descq))
441 		p_pkt = list_first_entry(&p_rx->active_descq,
442 					 struct qed_ll2_rx_packet, list_entry);
443 	if (!p_pkt) {
444 		DP_NOTICE(p_hwfn,
445 			  "GSI Rx completion but active_descq is empty\n");
446 		return -EIO;
447 	}
448 
449 	list_del(&p_pkt->list_entry);
450 	parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
451 	packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
452 	vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
453 	src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
454 	src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
455 	if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
456 		DP_NOTICE(p_hwfn,
457 			  "Mismatch between active_descq and the LL2 Rx chain\n");
458 	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
459 
460 	spin_unlock_irqrestore(&p_rx->lock, lock_flags);
461 	qed_ll2b_complete_rx_gsi_packet(p_hwfn,
462 					p_ll2_info->my_id,
463 					p_pkt->cookie,
464 					p_pkt->rx_buf_addr,
465 					packet_length,
466 					p_cqe->rx_cqe_gsi.data_length_error,
467 					parse_flags,
468 					vlan,
469 					src_mac_addrhi,
470 					src_mac_addrlo, b_last_cqe);
471 	spin_lock_irqsave(&p_rx->lock, lock_flags);
472 
473 	return 0;
474 }
475 
476 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
477 				      struct qed_ll2_info *p_ll2_conn,
478 				      union core_rx_cqe_union *p_cqe,
479 				      unsigned long *p_lock_flags,
480 				      bool b_last_cqe)
481 {
482 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
483 	struct qed_ll2_rx_packet *p_pkt = NULL;
484 
485 	if (!list_empty(&p_rx->active_descq))
486 		p_pkt = list_first_entry(&p_rx->active_descq,
487 					 struct qed_ll2_rx_packet, list_entry);
488 	if (!p_pkt) {
489 		DP_NOTICE(p_hwfn,
490 			  "LL2 Rx completion but active_descq is empty\n");
491 		return -EIO;
492 	}
493 	list_del(&p_pkt->list_entry);
494 
495 	if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
496 		DP_NOTICE(p_hwfn,
497 			  "Mismatch between active_descq and the LL2 Rx chain\n");
498 	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
499 
500 	spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
501 	qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
502 				    p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
503 	spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
504 
505 	return 0;
506 }
507 
508 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
509 {
510 	struct qed_ll2_info *p_ll2_conn = cookie;
511 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
512 	union core_rx_cqe_union *cqe = NULL;
513 	u16 cq_new_idx = 0, cq_old_idx = 0;
514 	unsigned long flags = 0;
515 	int rc = 0;
516 
517 	spin_lock_irqsave(&p_rx->lock, flags);
518 	cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
519 	cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
520 
521 	while (cq_new_idx != cq_old_idx) {
522 		bool b_last_cqe = (cq_new_idx == cq_old_idx);
523 
524 		cqe = qed_chain_consume(&p_rx->rcq_chain);
525 		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
526 
527 		DP_VERBOSE(p_hwfn,
528 			   QED_MSG_LL2,
529 			   "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
530 			   cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
531 
532 		switch (cqe->rx_cqe_sp.type) {
533 		case CORE_RX_CQE_TYPE_SLOW_PATH:
534 			DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
535 			rc = -EINVAL;
536 			break;
537 		case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
538 			rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
539 							cqe, flags, b_last_cqe);
540 			break;
541 		case CORE_RX_CQE_TYPE_REGULAR:
542 			rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
543 							cqe, &flags,
544 							b_last_cqe);
545 			break;
546 		default:
547 			rc = -EIO;
548 		}
549 	}
550 
551 	spin_unlock_irqrestore(&p_rx->lock, flags);
552 	return rc;
553 }
554 
555 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
556 {
557 	struct qed_ll2_info *p_ll2_conn = NULL;
558 	struct qed_ll2_rx_packet *p_pkt = NULL;
559 	struct qed_ll2_rx_queue *p_rx;
560 
561 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
562 	if (!p_ll2_conn)
563 		return;
564 
565 	p_rx = &p_ll2_conn->rx_queue;
566 
567 	while (!list_empty(&p_rx->active_descq)) {
568 		dma_addr_t rx_buf_addr;
569 		void *cookie;
570 		bool b_last;
571 
572 		p_pkt = list_first_entry(&p_rx->active_descq,
573 					 struct qed_ll2_rx_packet, list_entry);
574 		if (!p_pkt)
575 			break;
576 
577 		list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
578 
579 		if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
580 			struct qed_ooo_buffer *p_buffer;
581 
582 			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
583 			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
584 						p_buffer);
585 		} else {
586 			rx_buf_addr = p_pkt->rx_buf_addr;
587 			cookie = p_pkt->cookie;
588 
589 			b_last = list_empty(&p_rx->active_descq);
590 		}
591 	}
592 }
593 
594 #if IS_ENABLED(CONFIG_QED_ISCSI)
595 static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
596 {
597 	u8 bd_flags = 0;
598 
599 	if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
600 		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
601 
602 	return bd_flags;
603 }
604 
605 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
606 				  struct qed_ll2_info *p_ll2_conn)
607 {
608 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
609 	u16 packet_length = 0, parse_flags = 0, vlan = 0;
610 	struct qed_ll2_rx_packet *p_pkt = NULL;
611 	u32 num_ooo_add_to_peninsula = 0, cid;
612 	union core_rx_cqe_union *cqe = NULL;
613 	u16 cq_new_idx = 0, cq_old_idx = 0;
614 	struct qed_ooo_buffer *p_buffer;
615 	struct ooo_opaque *iscsi_ooo;
616 	u8 placement_offset = 0;
617 	u8 cqe_type;
618 
619 	cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
620 	cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
621 	if (cq_new_idx == cq_old_idx)
622 		return 0;
623 
624 	while (cq_new_idx != cq_old_idx) {
625 		struct core_rx_fast_path_cqe *p_cqe_fp;
626 
627 		cqe = qed_chain_consume(&p_rx->rcq_chain);
628 		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
629 		cqe_type = cqe->rx_cqe_sp.type;
630 
631 		if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
632 			DP_NOTICE(p_hwfn,
633 				  "Got a non-regular LB LL2 completion [type 0x%02x]\n",
634 				  cqe_type);
635 			return -EINVAL;
636 		}
637 		p_cqe_fp = &cqe->rx_cqe_fp;
638 
639 		placement_offset = p_cqe_fp->placement_offset;
640 		parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
641 		packet_length = le16_to_cpu(p_cqe_fp->packet_length);
642 		vlan = le16_to_cpu(p_cqe_fp->vlan);
643 		iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
644 		qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
645 					   iscsi_ooo);
646 		cid = le32_to_cpu(iscsi_ooo->cid);
647 
648 		/* Process delete isle first */
649 		if (iscsi_ooo->drop_size)
650 			qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
651 					     iscsi_ooo->drop_isle,
652 					     iscsi_ooo->drop_size);
653 
654 		if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
655 			continue;
656 
657 		/* Now process create/add/join isles */
658 		if (list_empty(&p_rx->active_descq)) {
659 			DP_NOTICE(p_hwfn,
660 				  "LL2 OOO RX chain has no submitted buffers\n"
661 				  );
662 			return -EIO;
663 		}
664 
665 		p_pkt = list_first_entry(&p_rx->active_descq,
666 					 struct qed_ll2_rx_packet, list_entry);
667 
668 		if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
669 		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
670 		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
671 		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
672 		    (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
673 			if (!p_pkt) {
674 				DP_NOTICE(p_hwfn,
675 					  "LL2 OOO RX packet is not valid\n");
676 				return -EIO;
677 			}
678 			list_del(&p_pkt->list_entry);
679 			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
680 			p_buffer->packet_length = packet_length;
681 			p_buffer->parse_flags = parse_flags;
682 			p_buffer->vlan = vlan;
683 			p_buffer->placement_offset = placement_offset;
684 			qed_chain_consume(&p_rx->rxq_chain);
685 			list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
686 
687 			switch (iscsi_ooo->ooo_opcode) {
688 			case TCP_EVENT_ADD_NEW_ISLE:
689 				qed_ooo_add_new_isle(p_hwfn,
690 						     p_hwfn->p_ooo_info,
691 						     cid,
692 						     iscsi_ooo->ooo_isle,
693 						     p_buffer);
694 				break;
695 			case TCP_EVENT_ADD_ISLE_RIGHT:
696 				qed_ooo_add_new_buffer(p_hwfn,
697 						       p_hwfn->p_ooo_info,
698 						       cid,
699 						       iscsi_ooo->ooo_isle,
700 						       p_buffer,
701 						       QED_OOO_RIGHT_BUF);
702 				break;
703 			case TCP_EVENT_ADD_ISLE_LEFT:
704 				qed_ooo_add_new_buffer(p_hwfn,
705 						       p_hwfn->p_ooo_info,
706 						       cid,
707 						       iscsi_ooo->ooo_isle,
708 						       p_buffer,
709 						       QED_OOO_LEFT_BUF);
710 				break;
711 			case TCP_EVENT_JOIN:
712 				qed_ooo_add_new_buffer(p_hwfn,
713 						       p_hwfn->p_ooo_info,
714 						       cid,
715 						       iscsi_ooo->ooo_isle +
716 						       1,
717 						       p_buffer,
718 						       QED_OOO_LEFT_BUF);
719 				qed_ooo_join_isles(p_hwfn,
720 						   p_hwfn->p_ooo_info,
721 						   cid, iscsi_ooo->ooo_isle);
722 				break;
723 			case TCP_EVENT_ADD_PEN:
724 				num_ooo_add_to_peninsula++;
725 				qed_ooo_put_ready_buffer(p_hwfn,
726 							 p_hwfn->p_ooo_info,
727 							 p_buffer, true);
728 				break;
729 			}
730 		} else {
731 			DP_NOTICE(p_hwfn,
732 				  "Unexpected event (%d) TX OOO completion\n",
733 				  iscsi_ooo->ooo_opcode);
734 		}
735 	}
736 
737 	return 0;
738 }
739 
740 static void
741 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
742 			  struct qed_ll2_info *p_ll2_conn)
743 {
744 	struct qed_ooo_buffer *p_buffer;
745 	int rc;
746 	u16 l4_hdr_offset_w;
747 	dma_addr_t first_frag;
748 	u16 parse_flags;
749 	u8 bd_flags;
750 
751 	/* Submit Tx buffers here */
752 	while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
753 						    p_hwfn->p_ooo_info))) {
754 		l4_hdr_offset_w = 0;
755 		bd_flags = 0;
756 
757 		first_frag = p_buffer->rx_buffer_phys_addr +
758 			     p_buffer->placement_offset;
759 		parse_flags = p_buffer->parse_flags;
760 		bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
761 		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
762 		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
763 
764 		rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
765 					       p_buffer->vlan, bd_flags,
766 					       l4_hdr_offset_w,
767 					       p_ll2_conn->conn.tx_dest, 0,
768 					       first_frag,
769 					       p_buffer->packet_length,
770 					       p_buffer, true);
771 		if (rc) {
772 			qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
773 						 p_buffer, false);
774 			break;
775 		}
776 	}
777 }
778 
779 static void
780 qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
781 			  struct qed_ll2_info *p_ll2_conn)
782 {
783 	struct qed_ooo_buffer *p_buffer;
784 	int rc;
785 
786 	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
787 						   p_hwfn->p_ooo_info))) {
788 		rc = qed_ll2_post_rx_buffer(p_hwfn,
789 					    p_ll2_conn->my_id,
790 					    p_buffer->rx_buffer_phys_addr,
791 					    0, p_buffer, true);
792 		if (rc) {
793 			qed_ooo_put_free_buffer(p_hwfn,
794 						p_hwfn->p_ooo_info, p_buffer);
795 			break;
796 		}
797 	}
798 }
799 
800 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
801 {
802 	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
803 	int rc;
804 
805 	rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
806 	if (rc)
807 		return rc;
808 
809 	qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
810 	qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
811 
812 	return 0;
813 }
814 
815 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
816 {
817 	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
818 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
819 	struct qed_ll2_tx_packet *p_pkt = NULL;
820 	struct qed_ooo_buffer *p_buffer;
821 	bool b_dont_submit_rx = false;
822 	u16 new_idx = 0, num_bds = 0;
823 	int rc;
824 
825 	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
826 	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
827 
828 	if (!num_bds)
829 		return 0;
830 
831 	while (num_bds) {
832 		if (list_empty(&p_tx->active_descq))
833 			return -EINVAL;
834 
835 		p_pkt = list_first_entry(&p_tx->active_descq,
836 					 struct qed_ll2_tx_packet, list_entry);
837 		if (!p_pkt)
838 			return -EINVAL;
839 
840 		if (p_pkt->bd_used != 1) {
841 			DP_NOTICE(p_hwfn,
842 				  "Unexpectedly many BDs(%d) in TX OOO completion\n",
843 				  p_pkt->bd_used);
844 			return -EINVAL;
845 		}
846 
847 		list_del(&p_pkt->list_entry);
848 
849 		num_bds--;
850 		p_tx->bds_idx++;
851 		qed_chain_consume(&p_tx->txq_chain);
852 
853 		p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
854 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
855 
856 		if (b_dont_submit_rx) {
857 			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
858 						p_buffer);
859 			continue;
860 		}
861 
862 		rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
863 					    p_buffer->rx_buffer_phys_addr, 0,
864 					    p_buffer, true);
865 		if (rc != 0) {
866 			qed_ooo_put_free_buffer(p_hwfn,
867 						p_hwfn->p_ooo_info, p_buffer);
868 			b_dont_submit_rx = true;
869 		}
870 	}
871 
872 	qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
873 
874 	return 0;
875 }
876 
877 static int
878 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
879 			       struct qed_ll2_info *p_ll2_info,
880 			       u16 rx_num_ooo_buffers, u16 mtu)
881 {
882 	struct qed_ooo_buffer *p_buf = NULL;
883 	void *p_virt;
884 	u16 buf_idx;
885 	int rc = 0;
886 
887 	if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
888 		return rc;
889 
890 	if (!rx_num_ooo_buffers)
891 		return -EINVAL;
892 
893 	for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
894 		p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
895 		if (!p_buf) {
896 			rc = -ENOMEM;
897 			goto out;
898 		}
899 
900 		p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
901 		p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
902 					 ETH_CACHE_LINE_SIZE - 1) &
903 					~(ETH_CACHE_LINE_SIZE - 1);
904 		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
905 					    p_buf->rx_buffer_size,
906 					    &p_buf->rx_buffer_phys_addr,
907 					    GFP_KERNEL);
908 		if (!p_virt) {
909 			kfree(p_buf);
910 			rc = -ENOMEM;
911 			goto out;
912 		}
913 
914 		p_buf->rx_buffer_virt_addr = p_virt;
915 		qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
916 	}
917 
918 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
919 		   "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
920 		   rx_num_ooo_buffers, p_buf->rx_buffer_size);
921 
922 out:
923 	return rc;
924 }
925 
926 static void
927 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
928 				 struct qed_ll2_info *p_ll2_conn)
929 {
930 	if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
931 		return;
932 
933 	qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
934 	qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
935 }
936 
937 static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
938 					   struct qed_ll2_info *p_ll2_conn)
939 {
940 	struct qed_ooo_buffer *p_buffer;
941 
942 	if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
943 		return;
944 
945 	qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
946 	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
947 						   p_hwfn->p_ooo_info))) {
948 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
949 				  p_buffer->rx_buffer_size,
950 				  p_buffer->rx_buffer_virt_addr,
951 				  p_buffer->rx_buffer_phys_addr);
952 		kfree(p_buffer);
953 	}
954 }
955 
956 static void qed_ll2_stop_ooo(struct qed_dev *cdev)
957 {
958 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
959 	u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
960 
961 	DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
962 		   *handle);
963 
964 	qed_ll2_terminate_connection(hwfn, *handle);
965 	qed_ll2_release_connection(hwfn, *handle);
966 	*handle = QED_LL2_UNUSED_HANDLE;
967 }
968 
969 static int qed_ll2_start_ooo(struct qed_dev *cdev,
970 			     struct qed_ll2_params *params)
971 {
972 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
973 	u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
974 	struct qed_ll2_conn ll2_info = { 0 };
975 	int rc;
976 
977 	ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
978 	ll2_info.mtu = params->mtu;
979 	ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
980 	ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
981 	ll2_info.tx_tc = OOO_LB_TC;
982 	ll2_info.tx_dest = CORE_TX_DEST_LB;
983 
984 	rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
985 					QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
986 					handle);
987 	if (rc) {
988 		DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
989 		goto out;
990 	}
991 
992 	rc = qed_ll2_establish_connection(hwfn, *handle);
993 	if (rc) {
994 		DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
995 		goto fail;
996 	}
997 
998 	return 0;
999 
1000 fail:
1001 	qed_ll2_release_connection(hwfn, *handle);
1002 out:
1003 	*handle = QED_LL2_UNUSED_HANDLE;
1004 	return rc;
1005 }
1006 #else /* IS_ENABLED(CONFIG_QED_ISCSI) */
1007 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
1008 				     void *p_cookie) { return -EINVAL; }
1009 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
1010 				     void *p_cookie) { return -EINVAL; }
1011 static inline int
1012 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1013 			       struct qed_ll2_info *p_ll2_info,
1014 			       u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
1015 static inline void
1016 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1017 				 struct qed_ll2_info *p_ll2_conn) { return; }
1018 static inline void
1019 qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1020 			       struct qed_ll2_info *p_ll2_conn) { return; }
1021 static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
1022 static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
1023 				    struct qed_ll2_params *params)
1024 				    { return -EINVAL; }
1025 #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
1026 
1027 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1028 				     struct qed_ll2_info *p_ll2_conn,
1029 				     u8 action_on_error)
1030 {
1031 	enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1032 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1033 	struct core_rx_start_ramrod_data *p_ramrod = NULL;
1034 	struct qed_spq_entry *p_ent = NULL;
1035 	struct qed_sp_init_data init_data;
1036 	u16 cqe_pbl_size;
1037 	int rc = 0;
1038 
1039 	/* Get SPQ entry */
1040 	memset(&init_data, 0, sizeof(init_data));
1041 	init_data.cid = p_ll2_conn->cid;
1042 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1043 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1044 
1045 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1046 				 CORE_RAMROD_RX_QUEUE_START,
1047 				 PROTOCOLID_CORE, &init_data);
1048 	if (rc)
1049 		return rc;
1050 
1051 	p_ramrod = &p_ent->ramrod.core_rx_queue_start;
1052 
1053 	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1054 	p_ramrod->sb_index = p_rx->rx_sb_index;
1055 	p_ramrod->complete_event_flg = 1;
1056 
1057 	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1058 	DMA_REGPAIR_LE(p_ramrod->bd_base,
1059 		       p_rx->rxq_chain.p_phys_addr);
1060 	cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
1061 	p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
1062 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1063 		       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1064 
1065 	p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1066 	p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
1067 	p_ramrod->queue_id = p_ll2_conn->queue_id;
1068 	p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1069 									  : 1;
1070 
1071 	if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
1072 	    p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
1073 		p_ramrod->mf_si_bcast_accept_all = 1;
1074 		p_ramrod->mf_si_mcast_accept_all = 1;
1075 	} else {
1076 		p_ramrod->mf_si_bcast_accept_all = 0;
1077 		p_ramrod->mf_si_mcast_accept_all = 0;
1078 	}
1079 
1080 	p_ramrod->action_on_error.error_type = action_on_error;
1081 	p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1082 	return qed_spq_post(p_hwfn, p_ent, NULL);
1083 }
1084 
1085 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1086 				     struct qed_ll2_info *p_ll2_conn)
1087 {
1088 	enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1089 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1090 	struct core_tx_start_ramrod_data *p_ramrod = NULL;
1091 	struct qed_spq_entry *p_ent = NULL;
1092 	struct qed_sp_init_data init_data;
1093 	union qed_qm_pq_params pq_params;
1094 	u16 pq_id = 0, pbl_size;
1095 	int rc = -EINVAL;
1096 
1097 	if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1098 		return 0;
1099 
1100 	if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1101 		p_ll2_conn->tx_stats_en = 0;
1102 	else
1103 		p_ll2_conn->tx_stats_en = 1;
1104 
1105 	/* Get SPQ entry */
1106 	memset(&init_data, 0, sizeof(init_data));
1107 	init_data.cid = p_ll2_conn->cid;
1108 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1109 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1110 
1111 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1112 				 CORE_RAMROD_TX_QUEUE_START,
1113 				 PROTOCOLID_CORE, &init_data);
1114 	if (rc)
1115 		return rc;
1116 
1117 	p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1118 
1119 	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1120 	p_ramrod->sb_index = p_tx->tx_sb_index;
1121 	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1122 	p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1123 	p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1124 
1125 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1126 		       qed_chain_get_pbl_phys(&p_tx->txq_chain));
1127 	pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1128 	p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1129 
1130 	memset(&pq_params, 0, sizeof(pq_params));
1131 	pq_params.core.tc = p_ll2_conn->conn.tx_tc;
1132 	pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
1133 	p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1134 
1135 	switch (conn_type) {
1136 	case QED_LL2_TYPE_FCOE:
1137 		p_ramrod->conn_type = PROTOCOLID_FCOE;
1138 		break;
1139 	case QED_LL2_TYPE_ISCSI:
1140 	case QED_LL2_TYPE_ISCSI_OOO:
1141 		p_ramrod->conn_type = PROTOCOLID_ISCSI;
1142 		break;
1143 	case QED_LL2_TYPE_ROCE:
1144 		p_ramrod->conn_type = PROTOCOLID_ROCE;
1145 		break;
1146 	default:
1147 		p_ramrod->conn_type = PROTOCOLID_ETH;
1148 		DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1149 	}
1150 
1151 	p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1152 	return qed_spq_post(p_hwfn, p_ent, NULL);
1153 }
1154 
1155 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1156 				    struct qed_ll2_info *p_ll2_conn)
1157 {
1158 	struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1159 	struct qed_spq_entry *p_ent = NULL;
1160 	struct qed_sp_init_data init_data;
1161 	int rc = -EINVAL;
1162 
1163 	/* Get SPQ entry */
1164 	memset(&init_data, 0, sizeof(init_data));
1165 	init_data.cid = p_ll2_conn->cid;
1166 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1167 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1168 
1169 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1170 				 CORE_RAMROD_RX_QUEUE_STOP,
1171 				 PROTOCOLID_CORE, &init_data);
1172 	if (rc)
1173 		return rc;
1174 
1175 	p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1176 
1177 	p_ramrod->complete_event_flg = 1;
1178 	p_ramrod->queue_id = p_ll2_conn->queue_id;
1179 
1180 	return qed_spq_post(p_hwfn, p_ent, NULL);
1181 }
1182 
1183 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1184 				    struct qed_ll2_info *p_ll2_conn)
1185 {
1186 	struct qed_spq_entry *p_ent = NULL;
1187 	struct qed_sp_init_data init_data;
1188 	int rc = -EINVAL;
1189 
1190 	/* Get SPQ entry */
1191 	memset(&init_data, 0, sizeof(init_data));
1192 	init_data.cid = p_ll2_conn->cid;
1193 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1194 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1195 
1196 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1197 				 CORE_RAMROD_TX_QUEUE_STOP,
1198 				 PROTOCOLID_CORE, &init_data);
1199 	if (rc)
1200 		return rc;
1201 
1202 	return qed_spq_post(p_hwfn, p_ent, NULL);
1203 }
1204 
1205 static int
1206 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1207 			      struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
1208 {
1209 	struct qed_ll2_rx_packet *p_descq;
1210 	u32 capacity;
1211 	int rc = 0;
1212 
1213 	if (!rx_num_desc)
1214 		goto out;
1215 
1216 	rc = qed_chain_alloc(p_hwfn->cdev,
1217 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1218 			     QED_CHAIN_MODE_NEXT_PTR,
1219 			     QED_CHAIN_CNT_TYPE_U16,
1220 			     rx_num_desc,
1221 			     sizeof(struct core_rx_bd),
1222 			     &p_ll2_info->rx_queue.rxq_chain);
1223 	if (rc) {
1224 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1225 		goto out;
1226 	}
1227 
1228 	capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1229 	p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1230 			  GFP_KERNEL);
1231 	if (!p_descq) {
1232 		rc = -ENOMEM;
1233 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1234 		goto out;
1235 	}
1236 	p_ll2_info->rx_queue.descq_array = p_descq;
1237 
1238 	rc = qed_chain_alloc(p_hwfn->cdev,
1239 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1240 			     QED_CHAIN_MODE_PBL,
1241 			     QED_CHAIN_CNT_TYPE_U16,
1242 			     rx_num_desc,
1243 			     sizeof(struct core_rx_fast_path_cqe),
1244 			     &p_ll2_info->rx_queue.rcq_chain);
1245 	if (rc) {
1246 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1247 		goto out;
1248 	}
1249 
1250 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1251 		   "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1252 		   p_ll2_info->conn.conn_type, rx_num_desc);
1253 
1254 out:
1255 	return rc;
1256 }
1257 
1258 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1259 					 struct qed_ll2_info *p_ll2_info,
1260 					 u16 tx_num_desc)
1261 {
1262 	struct qed_ll2_tx_packet *p_descq;
1263 	u32 capacity;
1264 	int rc = 0;
1265 
1266 	if (!tx_num_desc)
1267 		goto out;
1268 
1269 	rc = qed_chain_alloc(p_hwfn->cdev,
1270 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1271 			     QED_CHAIN_MODE_PBL,
1272 			     QED_CHAIN_CNT_TYPE_U16,
1273 			     tx_num_desc,
1274 			     sizeof(struct core_tx_bd),
1275 			     &p_ll2_info->tx_queue.txq_chain);
1276 	if (rc)
1277 		goto out;
1278 
1279 	capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1280 	p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1281 			  GFP_KERNEL);
1282 	if (!p_descq) {
1283 		rc = -ENOMEM;
1284 		goto out;
1285 	}
1286 	p_ll2_info->tx_queue.descq_array = p_descq;
1287 
1288 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1289 		   "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1290 		   p_ll2_info->conn.conn_type, tx_num_desc);
1291 
1292 out:
1293 	if (rc)
1294 		DP_NOTICE(p_hwfn,
1295 			  "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1296 			  tx_num_desc);
1297 	return rc;
1298 }
1299 
1300 int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
1301 			       struct qed_ll2_conn *p_params,
1302 			       u16 rx_num_desc,
1303 			       u16 tx_num_desc,
1304 			       u8 *p_connection_handle)
1305 {
1306 	qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1307 	struct qed_ll2_info *p_ll2_info = NULL;
1308 	int rc;
1309 	u8 i;
1310 
1311 	if (!p_connection_handle || !p_hwfn->p_ll2_info)
1312 		return -EINVAL;
1313 
1314 	/* Find a free connection to be used */
1315 	for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1316 		mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1317 		if (p_hwfn->p_ll2_info[i].b_active) {
1318 			mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1319 			continue;
1320 		}
1321 
1322 		p_hwfn->p_ll2_info[i].b_active = true;
1323 		p_ll2_info = &p_hwfn->p_ll2_info[i];
1324 		mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1325 		break;
1326 	}
1327 	if (!p_ll2_info)
1328 		return -EBUSY;
1329 
1330 	p_ll2_info->conn = *p_params;
1331 
1332 	rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1333 	if (rc)
1334 		goto q_allocate_fail;
1335 
1336 	rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
1337 	if (rc)
1338 		goto q_allocate_fail;
1339 
1340 	rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1341 					    rx_num_desc * 2, p_params->mtu);
1342 	if (rc)
1343 		goto q_allocate_fail;
1344 
1345 	/* Register callbacks for the Rx/Tx queues */
1346 	if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
1347 		comp_rx_cb = qed_ll2_lb_rxq_completion;
1348 		comp_tx_cb = qed_ll2_lb_txq_completion;
1349 	} else {
1350 		comp_rx_cb = qed_ll2_rxq_completion;
1351 		comp_tx_cb = qed_ll2_txq_completion;
1352 	}
1353 
1354 	if (rx_num_desc) {
1355 		qed_int_register_cb(p_hwfn, comp_rx_cb,
1356 				    &p_hwfn->p_ll2_info[i],
1357 				    &p_ll2_info->rx_queue.rx_sb_index,
1358 				    &p_ll2_info->rx_queue.p_fw_cons);
1359 		p_ll2_info->rx_queue.b_cb_registred = true;
1360 	}
1361 
1362 	if (tx_num_desc) {
1363 		qed_int_register_cb(p_hwfn,
1364 				    comp_tx_cb,
1365 				    &p_hwfn->p_ll2_info[i],
1366 				    &p_ll2_info->tx_queue.tx_sb_index,
1367 				    &p_ll2_info->tx_queue.p_fw_cons);
1368 		p_ll2_info->tx_queue.b_cb_registred = true;
1369 	}
1370 
1371 	*p_connection_handle = i;
1372 	return rc;
1373 
1374 q_allocate_fail:
1375 	qed_ll2_release_connection(p_hwfn, i);
1376 	return -ENOMEM;
1377 }
1378 
1379 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1380 					   struct qed_ll2_info *p_ll2_conn)
1381 {
1382 	u8 action_on_error = 0;
1383 
1384 	if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1385 		return 0;
1386 
1387 	DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1388 
1389 	SET_FIELD(action_on_error,
1390 		  CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
1391 		  p_ll2_conn->conn.ai_err_packet_too_big);
1392 	SET_FIELD(action_on_error,
1393 		  CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
1394 
1395 	return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1396 }
1397 
1398 int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1399 {
1400 	struct qed_ll2_info *p_ll2_conn;
1401 	struct qed_ll2_rx_queue *p_rx;
1402 	struct qed_ll2_tx_queue *p_tx;
1403 	int rc = -EINVAL;
1404 	u32 i, capacity;
1405 	u8 qid;
1406 
1407 	p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1408 	if (!p_ll2_conn)
1409 		return -EINVAL;
1410 	p_rx = &p_ll2_conn->rx_queue;
1411 	p_tx = &p_ll2_conn->tx_queue;
1412 
1413 	qed_chain_reset(&p_rx->rxq_chain);
1414 	qed_chain_reset(&p_rx->rcq_chain);
1415 	INIT_LIST_HEAD(&p_rx->active_descq);
1416 	INIT_LIST_HEAD(&p_rx->free_descq);
1417 	INIT_LIST_HEAD(&p_rx->posting_descq);
1418 	spin_lock_init(&p_rx->lock);
1419 	capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1420 	for (i = 0; i < capacity; i++)
1421 		list_add_tail(&p_rx->descq_array[i].list_entry,
1422 			      &p_rx->free_descq);
1423 	*p_rx->p_fw_cons = 0;
1424 
1425 	qed_chain_reset(&p_tx->txq_chain);
1426 	INIT_LIST_HEAD(&p_tx->active_descq);
1427 	INIT_LIST_HEAD(&p_tx->free_descq);
1428 	INIT_LIST_HEAD(&p_tx->sending_descq);
1429 	spin_lock_init(&p_tx->lock);
1430 	capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1431 	for (i = 0; i < capacity; i++)
1432 		list_add_tail(&p_tx->descq_array[i].list_entry,
1433 			      &p_tx->free_descq);
1434 	p_tx->cur_completing_bd_idx = 0;
1435 	p_tx->bds_idx = 0;
1436 	p_tx->b_completing_packet = false;
1437 	p_tx->cur_send_packet = NULL;
1438 	p_tx->cur_send_frag_num = 0;
1439 	p_tx->cur_completing_frag_num = 0;
1440 	*p_tx->p_fw_cons = 0;
1441 
1442 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1443 
1444 	qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1445 	p_ll2_conn->queue_id = qid;
1446 	p_ll2_conn->tx_stats_id = qid;
1447 	p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1448 					    GTT_BAR0_MAP_REG_TSDM_RAM +
1449 					    TSTORM_LL2_RX_PRODS_OFFSET(qid);
1450 	p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1451 					    qed_db_addr(p_ll2_conn->cid,
1452 							DQ_DEMS_LEGACY);
1453 
1454 	rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1455 	if (rc)
1456 		return rc;
1457 
1458 	rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1459 	if (rc)
1460 		return rc;
1461 
1462 	if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
1463 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
1464 
1465 	qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1466 
1467 	if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
1468 		qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1469 					    0x8906, 0,
1470 					    QED_LLH_FILTER_ETHERTYPE);
1471 		qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1472 					    0x8914, 0,
1473 					    QED_LLH_FILTER_ETHERTYPE);
1474 	}
1475 
1476 	return rc;
1477 }
1478 
1479 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1480 					     struct qed_ll2_rx_queue *p_rx,
1481 					     struct qed_ll2_rx_packet *p_curp)
1482 {
1483 	struct qed_ll2_rx_packet *p_posting_packet = NULL;
1484 	struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1485 	bool b_notify_fw = false;
1486 	u16 bd_prod, cq_prod;
1487 
1488 	/* This handles the flushing of already posted buffers */
1489 	while (!list_empty(&p_rx->posting_descq)) {
1490 		p_posting_packet = list_first_entry(&p_rx->posting_descq,
1491 						    struct qed_ll2_rx_packet,
1492 						    list_entry);
1493 		list_move_tail(&p_posting_packet->list_entry,
1494 			       &p_rx->active_descq);
1495 		b_notify_fw = true;
1496 	}
1497 
1498 	/* This handles the supplied packet [if there is one] */
1499 	if (p_curp) {
1500 		list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1501 		b_notify_fw = true;
1502 	}
1503 
1504 	if (!b_notify_fw)
1505 		return;
1506 
1507 	bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1508 	cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1509 	rx_prod.bd_prod = cpu_to_le16(bd_prod);
1510 	rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1511 	DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1512 }
1513 
1514 int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1515 			   u8 connection_handle,
1516 			   dma_addr_t addr,
1517 			   u16 buf_len, void *cookie, u8 notify_fw)
1518 {
1519 	struct core_rx_bd_with_buff_len *p_curb = NULL;
1520 	struct qed_ll2_rx_packet *p_curp = NULL;
1521 	struct qed_ll2_info *p_ll2_conn;
1522 	struct qed_ll2_rx_queue *p_rx;
1523 	unsigned long flags;
1524 	void *p_data;
1525 	int rc = 0;
1526 
1527 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1528 	if (!p_ll2_conn)
1529 		return -EINVAL;
1530 	p_rx = &p_ll2_conn->rx_queue;
1531 
1532 	spin_lock_irqsave(&p_rx->lock, flags);
1533 	if (!list_empty(&p_rx->free_descq))
1534 		p_curp = list_first_entry(&p_rx->free_descq,
1535 					  struct qed_ll2_rx_packet, list_entry);
1536 	if (p_curp) {
1537 		if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1538 		    qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1539 			p_data = qed_chain_produce(&p_rx->rxq_chain);
1540 			p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1541 			qed_chain_produce(&p_rx->rcq_chain);
1542 		}
1543 	}
1544 
1545 	/* If we're lacking entires, let's try to flush buffers to FW */
1546 	if (!p_curp || !p_curb) {
1547 		rc = -EBUSY;
1548 		p_curp = NULL;
1549 		goto out_notify;
1550 	}
1551 
1552 	/* We have an Rx packet we can fill */
1553 	DMA_REGPAIR_LE(p_curb->addr, addr);
1554 	p_curb->buff_length = cpu_to_le16(buf_len);
1555 	p_curp->rx_buf_addr = addr;
1556 	p_curp->cookie = cookie;
1557 	p_curp->rxq_bd = p_curb;
1558 	p_curp->buf_length = buf_len;
1559 	list_del(&p_curp->list_entry);
1560 
1561 	/* Check if we only want to enqueue this packet without informing FW */
1562 	if (!notify_fw) {
1563 		list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1564 		goto out;
1565 	}
1566 
1567 out_notify:
1568 	qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1569 out:
1570 	spin_unlock_irqrestore(&p_rx->lock, flags);
1571 	return rc;
1572 }
1573 
1574 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1575 					  struct qed_ll2_tx_queue *p_tx,
1576 					  struct qed_ll2_tx_packet *p_curp,
1577 					  u8 num_of_bds,
1578 					  dma_addr_t first_frag,
1579 					  u16 first_frag_len, void *p_cookie,
1580 					  u8 notify_fw)
1581 {
1582 	list_del(&p_curp->list_entry);
1583 	p_curp->cookie = p_cookie;
1584 	p_curp->bd_used = num_of_bds;
1585 	p_curp->notify_fw = notify_fw;
1586 	p_tx->cur_send_packet = p_curp;
1587 	p_tx->cur_send_frag_num = 0;
1588 
1589 	p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1590 	p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1591 	p_tx->cur_send_frag_num++;
1592 }
1593 
1594 static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1595 					     struct qed_ll2_info *p_ll2,
1596 					     struct qed_ll2_tx_packet *p_curp,
1597 					     u8 num_of_bds,
1598 					     enum core_tx_dest tx_dest,
1599 					     u16 vlan,
1600 					     u8 bd_flags,
1601 					     u16 l4_hdr_offset_w,
1602 					     enum core_roce_flavor_type type,
1603 					     dma_addr_t first_frag,
1604 					     u16 first_frag_len)
1605 {
1606 	struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1607 	u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1608 	struct core_tx_bd *start_bd = NULL;
1609 	u16 frag_idx;
1610 
1611 	start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1612 	start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1613 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1614 		  cpu_to_le16(l4_hdr_offset_w));
1615 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1616 	start_bd->bd_flags.as_bitfield = bd_flags;
1617 	start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1618 	    CORE_TX_BD_FLAGS_START_BD_SHIFT;
1619 	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
1620 	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
1621 	DMA_REGPAIR_LE(start_bd->addr, first_frag);
1622 	start_bd->nbytes = cpu_to_le16(first_frag_len);
1623 
1624 	DP_VERBOSE(p_hwfn,
1625 		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1626 		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1627 		   p_ll2->queue_id,
1628 		   p_ll2->cid,
1629 		   p_ll2->conn.conn_type,
1630 		   prod_idx,
1631 		   first_frag_len,
1632 		   num_of_bds,
1633 		   le32_to_cpu(start_bd->addr.hi),
1634 		   le32_to_cpu(start_bd->addr.lo));
1635 
1636 	if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1637 		return;
1638 
1639 	/* Need to provide the packet with additional BDs for frags */
1640 	for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1641 	     frag_idx < num_of_bds; frag_idx++) {
1642 		struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1643 
1644 		*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1645 		(*p_bd)->bd_flags.as_bitfield = 0;
1646 		(*p_bd)->bitfield1 = 0;
1647 		(*p_bd)->bitfield0 = 0;
1648 		p_curp->bds_set[frag_idx].tx_frag = 0;
1649 		p_curp->bds_set[frag_idx].frag_len = 0;
1650 	}
1651 }
1652 
1653 /* This should be called while the Txq spinlock is being held */
1654 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1655 				     struct qed_ll2_info *p_ll2_conn)
1656 {
1657 	bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1658 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1659 	struct qed_ll2_tx_packet *p_pkt = NULL;
1660 	struct core_db_data db_msg = { 0, 0, 0 };
1661 	u16 bd_prod;
1662 
1663 	/* If there are missing BDs, don't do anything now */
1664 	if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1665 	    p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1666 		return;
1667 
1668 	/* Push the current packet to the list and clean after it */
1669 	list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1670 		      &p_ll2_conn->tx_queue.sending_descq);
1671 	p_ll2_conn->tx_queue.cur_send_packet = NULL;
1672 	p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1673 
1674 	/* Notify FW of packet only if requested to */
1675 	if (!b_notify)
1676 		return;
1677 
1678 	bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1679 
1680 	while (!list_empty(&p_tx->sending_descq)) {
1681 		p_pkt = list_first_entry(&p_tx->sending_descq,
1682 					 struct qed_ll2_tx_packet, list_entry);
1683 		if (!p_pkt)
1684 			break;
1685 
1686 		list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1687 	}
1688 
1689 	SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1690 	SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1691 	SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1692 		  DQ_XCM_CORE_TX_BD_PROD_CMD);
1693 	db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1694 	db_msg.spq_prod = cpu_to_le16(bd_prod);
1695 
1696 	/* Make sure the BDs data is updated before ringing the doorbell */
1697 	wmb();
1698 
1699 	DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1700 
1701 	DP_VERBOSE(p_hwfn,
1702 		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1703 		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1704 		   p_ll2_conn->queue_id,
1705 		   p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
1706 }
1707 
1708 int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1709 			      u8 connection_handle,
1710 			      u8 num_of_bds,
1711 			      u16 vlan,
1712 			      u8 bd_flags,
1713 			      u16 l4_hdr_offset_w,
1714 			      enum qed_ll2_tx_dest e_tx_dest,
1715 			      enum qed_ll2_roce_flavor_type qed_roce_flavor,
1716 			      dma_addr_t first_frag,
1717 			      u16 first_frag_len, void *cookie, u8 notify_fw)
1718 {
1719 	struct qed_ll2_tx_packet *p_curp = NULL;
1720 	struct qed_ll2_info *p_ll2_conn = NULL;
1721 	enum core_roce_flavor_type roce_flavor;
1722 	struct qed_ll2_tx_queue *p_tx;
1723 	struct qed_chain *p_tx_chain;
1724 	enum core_tx_dest tx_dest;
1725 	unsigned long flags;
1726 	int rc = 0;
1727 
1728 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1729 	if (!p_ll2_conn)
1730 		return -EINVAL;
1731 	p_tx = &p_ll2_conn->tx_queue;
1732 	p_tx_chain = &p_tx->txq_chain;
1733 
1734 	if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1735 		return -EIO;
1736 
1737 	spin_lock_irqsave(&p_tx->lock, flags);
1738 	if (p_tx->cur_send_packet) {
1739 		rc = -EEXIST;
1740 		goto out;
1741 	}
1742 
1743 	/* Get entry, but only if we have tx elements for it */
1744 	if (!list_empty(&p_tx->free_descq))
1745 		p_curp = list_first_entry(&p_tx->free_descq,
1746 					  struct qed_ll2_tx_packet, list_entry);
1747 	if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1748 		p_curp = NULL;
1749 
1750 	if (!p_curp) {
1751 		rc = -EBUSY;
1752 		goto out;
1753 	}
1754 
1755 	tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
1756 						    CORE_TX_DEST_LB;
1757 	if (qed_roce_flavor == QED_LL2_ROCE) {
1758 		roce_flavor = CORE_ROCE;
1759 	} else if (qed_roce_flavor == QED_LL2_RROCE) {
1760 		roce_flavor = CORE_RROCE;
1761 	} else {
1762 		rc = -EINVAL;
1763 		goto out;
1764 	}
1765 
1766 	/* Prepare packet and BD, and perhaps send a doorbell to FW */
1767 	qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1768 				      num_of_bds, first_frag,
1769 				      first_frag_len, cookie, notify_fw);
1770 	qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
1771 					 num_of_bds, tx_dest,
1772 					 vlan, bd_flags, l4_hdr_offset_w,
1773 					 roce_flavor,
1774 					 first_frag, first_frag_len);
1775 
1776 	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1777 
1778 out:
1779 	spin_unlock_irqrestore(&p_tx->lock, flags);
1780 	return rc;
1781 }
1782 
1783 int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1784 				      u8 connection_handle,
1785 				      dma_addr_t addr, u16 nbytes)
1786 {
1787 	struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1788 	struct qed_ll2_info *p_ll2_conn = NULL;
1789 	u16 cur_send_frag_num = 0;
1790 	struct core_tx_bd *p_bd;
1791 	unsigned long flags;
1792 
1793 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1794 	if (!p_ll2_conn)
1795 		return -EINVAL;
1796 
1797 	if (!p_ll2_conn->tx_queue.cur_send_packet)
1798 		return -EINVAL;
1799 
1800 	p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1801 	cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1802 
1803 	if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1804 		return -EINVAL;
1805 
1806 	/* Fill the BD information, and possibly notify FW */
1807 	p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1808 	DMA_REGPAIR_LE(p_bd->addr, addr);
1809 	p_bd->nbytes = cpu_to_le16(nbytes);
1810 	p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1811 	p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1812 
1813 	p_ll2_conn->tx_queue.cur_send_frag_num++;
1814 
1815 	spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1816 	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1817 	spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1818 
1819 	return 0;
1820 }
1821 
1822 int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1823 {
1824 	struct qed_ll2_info *p_ll2_conn = NULL;
1825 	int rc = -EINVAL;
1826 
1827 	p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1828 	if (!p_ll2_conn)
1829 		return -EINVAL;
1830 
1831 	/* Stop Tx & Rx of connection, if needed */
1832 	if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1833 		rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1834 		if (rc)
1835 			return rc;
1836 		qed_ll2_txq_flush(p_hwfn, connection_handle);
1837 	}
1838 
1839 	if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1840 		rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1841 		if (rc)
1842 			return rc;
1843 		qed_ll2_rxq_flush(p_hwfn, connection_handle);
1844 	}
1845 
1846 	if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1847 		qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1848 
1849 	if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
1850 		qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1851 					       0x8906, 0,
1852 					       QED_LLH_FILTER_ETHERTYPE);
1853 		qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1854 					       0x8914, 0,
1855 					       QED_LLH_FILTER_ETHERTYPE);
1856 	}
1857 
1858 	return rc;
1859 }
1860 
1861 void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1862 {
1863 	struct qed_ll2_info *p_ll2_conn = NULL;
1864 
1865 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1866 	if (!p_ll2_conn)
1867 		return;
1868 
1869 	if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1870 		p_ll2_conn->rx_queue.b_cb_registred = false;
1871 		qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1872 	}
1873 
1874 	if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1875 		p_ll2_conn->tx_queue.b_cb_registred = false;
1876 		qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1877 	}
1878 
1879 	kfree(p_ll2_conn->tx_queue.descq_array);
1880 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1881 
1882 	kfree(p_ll2_conn->rx_queue.descq_array);
1883 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1884 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1885 
1886 	qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1887 
1888 	qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1889 
1890 	mutex_lock(&p_ll2_conn->mutex);
1891 	p_ll2_conn->b_active = false;
1892 	mutex_unlock(&p_ll2_conn->mutex);
1893 }
1894 
1895 struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1896 {
1897 	struct qed_ll2_info *p_ll2_connections;
1898 	u8 i;
1899 
1900 	/* Allocate LL2's set struct */
1901 	p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1902 				    sizeof(struct qed_ll2_info), GFP_KERNEL);
1903 	if (!p_ll2_connections) {
1904 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1905 		return NULL;
1906 	}
1907 
1908 	for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1909 		p_ll2_connections[i].my_id = i;
1910 
1911 	return p_ll2_connections;
1912 }
1913 
1914 void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1915 		   struct qed_ll2_info *p_ll2_connections)
1916 {
1917 	int i;
1918 
1919 	for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1920 		mutex_init(&p_ll2_connections[i].mutex);
1921 }
1922 
1923 void qed_ll2_free(struct qed_hwfn *p_hwfn,
1924 		  struct qed_ll2_info *p_ll2_connections)
1925 {
1926 	kfree(p_ll2_connections);
1927 }
1928 
1929 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1930 				struct qed_ptt *p_ptt,
1931 				struct qed_ll2_info *p_ll2_conn,
1932 				struct qed_ll2_stats *p_stats)
1933 {
1934 	struct core_ll2_tstorm_per_queue_stat tstats;
1935 	u8 qid = p_ll2_conn->queue_id;
1936 	u32 tstats_addr;
1937 
1938 	memset(&tstats, 0, sizeof(tstats));
1939 	tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1940 		      CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1941 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1942 
1943 	p_stats->packet_too_big_discard =
1944 			HILO_64_REGPAIR(tstats.packet_too_big_discard);
1945 	p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1946 }
1947 
1948 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1949 				struct qed_ptt *p_ptt,
1950 				struct qed_ll2_info *p_ll2_conn,
1951 				struct qed_ll2_stats *p_stats)
1952 {
1953 	struct core_ll2_ustorm_per_queue_stat ustats;
1954 	u8 qid = p_ll2_conn->queue_id;
1955 	u32 ustats_addr;
1956 
1957 	memset(&ustats, 0, sizeof(ustats));
1958 	ustats_addr = BAR0_MAP_REG_USDM_RAM +
1959 		      CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1960 	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1961 
1962 	p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1963 	p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1964 	p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1965 	p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1966 	p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1967 	p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1968 }
1969 
1970 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1971 				struct qed_ptt *p_ptt,
1972 				struct qed_ll2_info *p_ll2_conn,
1973 				struct qed_ll2_stats *p_stats)
1974 {
1975 	struct core_ll2_pstorm_per_queue_stat pstats;
1976 	u8 stats_id = p_ll2_conn->tx_stats_id;
1977 	u32 pstats_addr;
1978 
1979 	memset(&pstats, 0, sizeof(pstats));
1980 	pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1981 		      CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1982 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1983 
1984 	p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1985 	p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1986 	p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1987 	p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1988 	p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1989 	p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1990 }
1991 
1992 int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1993 		      u8 connection_handle, struct qed_ll2_stats *p_stats)
1994 {
1995 	struct qed_ll2_info *p_ll2_conn = NULL;
1996 	struct qed_ptt *p_ptt;
1997 
1998 	memset(p_stats, 0, sizeof(*p_stats));
1999 
2000 	if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2001 	    !p_hwfn->p_ll2_info)
2002 		return -EINVAL;
2003 
2004 	p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2005 
2006 	p_ptt = qed_ptt_acquire(p_hwfn);
2007 	if (!p_ptt) {
2008 		DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2009 		return -EINVAL;
2010 	}
2011 
2012 	_qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2013 	_qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2014 	if (p_ll2_conn->tx_stats_en)
2015 		_qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2016 
2017 	qed_ptt_release(p_hwfn, p_ptt);
2018 	return 0;
2019 }
2020 
2021 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2022 				    const struct qed_ll2_cb_ops *ops,
2023 				    void *cookie)
2024 {
2025 	cdev->ll2->cbs = ops;
2026 	cdev->ll2->cb_cookie = cookie;
2027 }
2028 
2029 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2030 {
2031 	struct qed_ll2_conn ll2_info;
2032 	struct qed_ll2_buffer *buffer, *tmp_buffer;
2033 	enum qed_ll2_conn_type conn_type;
2034 	struct qed_ptt *p_ptt;
2035 	int rc, i;
2036 	u8 gsi_enable = 1;
2037 
2038 	/* Initialize LL2 locks & lists */
2039 	INIT_LIST_HEAD(&cdev->ll2->list);
2040 	spin_lock_init(&cdev->ll2->lock);
2041 	cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2042 			     L1_CACHE_BYTES + params->mtu;
2043 	cdev->ll2->frags_mapped = params->frags_mapped;
2044 
2045 	/*Allocate memory for LL2 */
2046 	DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2047 		cdev->ll2->rx_size);
2048 	for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2049 		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2050 		if (!buffer) {
2051 			DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2052 			goto fail;
2053 		}
2054 
2055 		rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2056 					  &buffer->phys_addr);
2057 		if (rc) {
2058 			kfree(buffer);
2059 			goto fail;
2060 		}
2061 
2062 		list_add_tail(&buffer->list, &cdev->ll2->list);
2063 	}
2064 
2065 	switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
2066 	case QED_PCI_FCOE:
2067 		conn_type = QED_LL2_TYPE_FCOE;
2068 		gsi_enable = 0;
2069 		break;
2070 	case QED_PCI_ISCSI:
2071 		conn_type = QED_LL2_TYPE_ISCSI;
2072 		gsi_enable = 0;
2073 		break;
2074 	case QED_PCI_ETH_ROCE:
2075 		conn_type = QED_LL2_TYPE_ROCE;
2076 		break;
2077 	default:
2078 		conn_type = QED_LL2_TYPE_TEST;
2079 	}
2080 
2081 	/* Prepare the temporary ll2 information */
2082 	memset(&ll2_info, 0, sizeof(ll2_info));
2083 
2084 	ll2_info.conn_type = conn_type;
2085 	ll2_info.mtu = params->mtu;
2086 	ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2087 	ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
2088 	ll2_info.tx_tc = 0;
2089 	ll2_info.tx_dest = CORE_TX_DEST_NW;
2090 	ll2_info.gsi_enable = gsi_enable;
2091 
2092 	rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
2093 					QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
2094 					&cdev->ll2->handle);
2095 	if (rc) {
2096 		DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2097 		goto fail;
2098 	}
2099 
2100 	rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2101 					  cdev->ll2->handle);
2102 	if (rc) {
2103 		DP_INFO(cdev, "Failed to establish LL2 connection\n");
2104 		goto release_fail;
2105 	}
2106 
2107 	/* Post all Rx buffers to FW */
2108 	spin_lock_bh(&cdev->ll2->lock);
2109 	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
2110 		rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2111 					    cdev->ll2->handle,
2112 					    buffer->phys_addr, 0, buffer, 1);
2113 		if (rc) {
2114 			DP_INFO(cdev,
2115 				"Failed to post an Rx buffer; Deleting it\n");
2116 			dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2117 					 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2118 			kfree(buffer->data);
2119 			list_del(&buffer->list);
2120 			kfree(buffer);
2121 		} else {
2122 			cdev->ll2->rx_cnt++;
2123 		}
2124 	}
2125 	spin_unlock_bh(&cdev->ll2->lock);
2126 
2127 	if (!cdev->ll2->rx_cnt) {
2128 		DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2129 		goto release_terminate;
2130 	}
2131 
2132 	if (!is_valid_ether_addr(params->ll2_mac_address)) {
2133 		DP_INFO(cdev, "Invalid Ethernet address\n");
2134 		goto release_terminate;
2135 	}
2136 
2137 	if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2138 	    cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2139 		DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2140 		rc = qed_ll2_start_ooo(cdev, params);
2141 		if (rc) {
2142 			DP_INFO(cdev,
2143 				"Failed to initialize the OOO LL2 queue\n");
2144 			goto release_terminate;
2145 		}
2146 	}
2147 
2148 	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2149 	if (!p_ptt) {
2150 		DP_INFO(cdev, "Failed to acquire PTT\n");
2151 		goto release_terminate;
2152 	}
2153 
2154 	rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2155 				    params->ll2_mac_address);
2156 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2157 	if (rc) {
2158 		DP_ERR(cdev, "Failed to allocate LLH filter\n");
2159 		goto release_terminate_all;
2160 	}
2161 
2162 	ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2163 	return 0;
2164 
2165 release_terminate_all:
2166 
2167 release_terminate:
2168 	qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2169 release_fail:
2170 	qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2171 fail:
2172 	qed_ll2_kill_buffers(cdev);
2173 	cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2174 	return -EINVAL;
2175 }
2176 
2177 static int qed_ll2_stop(struct qed_dev *cdev)
2178 {
2179 	struct qed_ptt *p_ptt;
2180 	int rc;
2181 
2182 	if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2183 		return 0;
2184 
2185 	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2186 	if (!p_ptt) {
2187 		DP_INFO(cdev, "Failed to acquire PTT\n");
2188 		goto fail;
2189 	}
2190 
2191 	qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2192 				  cdev->ll2_mac_address);
2193 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2194 	eth_zero_addr(cdev->ll2_mac_address);
2195 
2196 	if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2197 	    cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2198 		qed_ll2_stop_ooo(cdev);
2199 
2200 	rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2201 					  cdev->ll2->handle);
2202 	if (rc)
2203 		DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2204 
2205 	qed_ll2_kill_buffers(cdev);
2206 
2207 	qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2208 	cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2209 
2210 	return rc;
2211 fail:
2212 	return -EINVAL;
2213 }
2214 
2215 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2216 {
2217 	const skb_frag_t *frag;
2218 	int rc = -EINVAL, i;
2219 	dma_addr_t mapping;
2220 	u16 vlan = 0;
2221 	u8 flags = 0;
2222 
2223 	if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2224 		DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2225 		return -EINVAL;
2226 	}
2227 
2228 	if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2229 		DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2230 		       1 + skb_shinfo(skb)->nr_frags);
2231 		return -EINVAL;
2232 	}
2233 
2234 	mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2235 				 skb->len, DMA_TO_DEVICE);
2236 	if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2237 		DP_NOTICE(cdev, "SKB mapping failed\n");
2238 		return -EINVAL;
2239 	}
2240 
2241 	/* Request HW to calculate IP csum */
2242 	if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2243 	      ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2244 		flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
2245 
2246 	if (skb_vlan_tag_present(skb)) {
2247 		vlan = skb_vlan_tag_get(skb);
2248 		flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
2249 	}
2250 
2251 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
2252 				       cdev->ll2->handle,
2253 				       1 + skb_shinfo(skb)->nr_frags,
2254 				       vlan, flags, 0, QED_LL2_TX_DEST_NW,
2255 				       0 /* RoCE FLAVOR */,
2256 				       mapping, skb->len, skb, 1);
2257 	if (rc)
2258 		goto err;
2259 
2260 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2261 		frag = &skb_shinfo(skb)->frags[i];
2262 		if (!cdev->ll2->frags_mapped) {
2263 			mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2264 						   skb_frag_size(frag),
2265 						   DMA_TO_DEVICE);
2266 
2267 			if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2268 						       mapping))) {
2269 				DP_NOTICE(cdev,
2270 					  "Unable to map frag - dropping packet\n");
2271 				rc = -ENOMEM;
2272 				goto err;
2273 			}
2274 		} else {
2275 			mapping = page_to_phys(skb_frag_page(frag)) |
2276 			    frag->page_offset;
2277 		}
2278 
2279 		rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2280 						       cdev->ll2->handle,
2281 						       mapping,
2282 						       skb_frag_size(frag));
2283 
2284 		/* if failed not much to do here, partial packet has been posted
2285 		 * we can't free memory, will need to wait for completion.
2286 		 */
2287 		if (rc)
2288 			goto err2;
2289 	}
2290 
2291 	return 0;
2292 
2293 err:
2294 	dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2295 
2296 err2:
2297 	return rc;
2298 }
2299 
2300 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2301 {
2302 	if (!cdev->ll2)
2303 		return -EINVAL;
2304 
2305 	return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2306 				 cdev->ll2->handle, stats);
2307 }
2308 
2309 const struct qed_ll2_ops qed_ll2_ops_pass = {
2310 	.start = &qed_ll2_start,
2311 	.stop = &qed_ll2_stop,
2312 	.start_xmit = &qed_ll2_start_xmit,
2313 	.register_cb_ops = &qed_ll2_register_cb_ops,
2314 	.get_stats = &qed_ll2_stats,
2315 };
2316 
2317 int qed_ll2_alloc_if(struct qed_dev *cdev)
2318 {
2319 	cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2320 	return cdev->ll2 ? 0 : -ENOMEM;
2321 }
2322 
2323 void qed_ll2_dealloc_if(struct qed_dev *cdev)
2324 {
2325 	kfree(cdev->ll2);
2326 	cdev->ll2 = NULL;
2327 }
2328