10a7fb11cSYuval Mintz /* QLogic qed NIC Driver
20a7fb11cSYuval Mintz  *
30a7fb11cSYuval Mintz  * Copyright (c) 2015 QLogic Corporation
40a7fb11cSYuval Mintz  *
50a7fb11cSYuval Mintz  * This software is available under the terms of the GNU General Public License
60a7fb11cSYuval Mintz  * (GPL) Version 2, available from the file COPYING in the main directory of
70a7fb11cSYuval Mintz  * this source tree.
80a7fb11cSYuval Mintz  */
90a7fb11cSYuval Mintz 
100a7fb11cSYuval Mintz #include <linux/types.h>
110a7fb11cSYuval Mintz #include <asm/byteorder.h>
120a7fb11cSYuval Mintz #include <linux/dma-mapping.h>
130a7fb11cSYuval Mintz #include <linux/if_vlan.h>
140a7fb11cSYuval Mintz #include <linux/kernel.h>
150a7fb11cSYuval Mintz #include <linux/pci.h>
160a7fb11cSYuval Mintz #include <linux/slab.h>
170a7fb11cSYuval Mintz #include <linux/stddef.h>
180a7fb11cSYuval Mintz #include <linux/version.h>
190a7fb11cSYuval Mintz #include <linux/workqueue.h>
200a7fb11cSYuval Mintz #include <net/ipv6.h>
210a7fb11cSYuval Mintz #include <linux/bitops.h>
220a7fb11cSYuval Mintz #include <linux/delay.h>
230a7fb11cSYuval Mintz #include <linux/errno.h>
240a7fb11cSYuval Mintz #include <linux/etherdevice.h>
250a7fb11cSYuval Mintz #include <linux/io.h>
260a7fb11cSYuval Mintz #include <linux/list.h>
270a7fb11cSYuval Mintz #include <linux/mutex.h>
280a7fb11cSYuval Mintz #include <linux/spinlock.h>
290a7fb11cSYuval Mintz #include <linux/string.h>
300a7fb11cSYuval Mintz #include <linux/qed/qed_ll2_if.h>
310a7fb11cSYuval Mintz #include "qed.h"
320a7fb11cSYuval Mintz #include "qed_cxt.h"
330a7fb11cSYuval Mintz #include "qed_dev_api.h"
340a7fb11cSYuval Mintz #include "qed_hsi.h"
350a7fb11cSYuval Mintz #include "qed_hw.h"
360a7fb11cSYuval Mintz #include "qed_int.h"
370a7fb11cSYuval Mintz #include "qed_ll2.h"
380a7fb11cSYuval Mintz #include "qed_mcp.h"
390a7fb11cSYuval Mintz #include "qed_reg_addr.h"
400a7fb11cSYuval Mintz #include "qed_sp.h"
410189efb8SYuval Mintz #include "qed_roce.h"
420a7fb11cSYuval Mintz 
430a7fb11cSYuval Mintz #define QED_LL2_RX_REGISTERED(ll2)	((ll2)->rx_queue.b_cb_registred)
440a7fb11cSYuval Mintz #define QED_LL2_TX_REGISTERED(ll2)	((ll2)->tx_queue.b_cb_registred)
450a7fb11cSYuval Mintz 
460a7fb11cSYuval Mintz #define QED_LL2_TX_SIZE (256)
470a7fb11cSYuval Mintz #define QED_LL2_RX_SIZE (4096)
480a7fb11cSYuval Mintz 
490a7fb11cSYuval Mintz struct qed_cb_ll2_info {
500a7fb11cSYuval Mintz 	int rx_cnt;
510a7fb11cSYuval Mintz 	u32 rx_size;
520a7fb11cSYuval Mintz 	u8 handle;
530a7fb11cSYuval Mintz 	bool frags_mapped;
540a7fb11cSYuval Mintz 
550a7fb11cSYuval Mintz 	/* Lock protecting LL2 buffer lists in sleepless context */
560a7fb11cSYuval Mintz 	spinlock_t lock;
570a7fb11cSYuval Mintz 	struct list_head list;
580a7fb11cSYuval Mintz 
590a7fb11cSYuval Mintz 	const struct qed_ll2_cb_ops *cbs;
600a7fb11cSYuval Mintz 	void *cb_cookie;
610a7fb11cSYuval Mintz };
620a7fb11cSYuval Mintz 
630a7fb11cSYuval Mintz struct qed_ll2_buffer {
640a7fb11cSYuval Mintz 	struct list_head list;
650a7fb11cSYuval Mintz 	void *data;
660a7fb11cSYuval Mintz 	dma_addr_t phys_addr;
670a7fb11cSYuval Mintz };
680a7fb11cSYuval Mintz 
690a7fb11cSYuval Mintz static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
700a7fb11cSYuval Mintz 					u8 connection_handle,
710a7fb11cSYuval Mintz 					void *cookie,
720a7fb11cSYuval Mintz 					dma_addr_t first_frag_addr,
730a7fb11cSYuval Mintz 					bool b_last_fragment,
740a7fb11cSYuval Mintz 					bool b_last_packet)
750a7fb11cSYuval Mintz {
760a7fb11cSYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
770a7fb11cSYuval Mintz 	struct sk_buff *skb = cookie;
780a7fb11cSYuval Mintz 
790a7fb11cSYuval Mintz 	/* All we need to do is release the mapping */
800a7fb11cSYuval Mintz 	dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
810a7fb11cSYuval Mintz 			 skb_headlen(skb), DMA_TO_DEVICE);
820a7fb11cSYuval Mintz 
830a7fb11cSYuval Mintz 	if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
840a7fb11cSYuval Mintz 		cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
850a7fb11cSYuval Mintz 				      b_last_fragment);
860a7fb11cSYuval Mintz 
870a7fb11cSYuval Mintz 	if (cdev->ll2->frags_mapped)
880a7fb11cSYuval Mintz 		/* Case where mapped frags were received, need to
890a7fb11cSYuval Mintz 		 * free skb with nr_frags marked as 0
900a7fb11cSYuval Mintz 		 */
910a7fb11cSYuval Mintz 		skb_shinfo(skb)->nr_frags = 0;
920a7fb11cSYuval Mintz 
930a7fb11cSYuval Mintz 	dev_kfree_skb_any(skb);
940a7fb11cSYuval Mintz }
950a7fb11cSYuval Mintz 
960a7fb11cSYuval Mintz static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
970a7fb11cSYuval Mintz 				u8 **data, dma_addr_t *phys_addr)
980a7fb11cSYuval Mintz {
990a7fb11cSYuval Mintz 	*data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
1000a7fb11cSYuval Mintz 	if (!(*data)) {
1010a7fb11cSYuval Mintz 		DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
1020a7fb11cSYuval Mintz 		return -ENOMEM;
1030a7fb11cSYuval Mintz 	}
1040a7fb11cSYuval Mintz 
1050a7fb11cSYuval Mintz 	*phys_addr = dma_map_single(&cdev->pdev->dev,
1060a7fb11cSYuval Mintz 				    ((*data) + NET_SKB_PAD),
1070a7fb11cSYuval Mintz 				    cdev->ll2->rx_size, DMA_FROM_DEVICE);
1080a7fb11cSYuval Mintz 	if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
1090a7fb11cSYuval Mintz 		DP_INFO(cdev, "Failed to map LL2 buffer data\n");
1100a7fb11cSYuval Mintz 		kfree((*data));
1110a7fb11cSYuval Mintz 		return -ENOMEM;
1120a7fb11cSYuval Mintz 	}
1130a7fb11cSYuval Mintz 
1140a7fb11cSYuval Mintz 	return 0;
1150a7fb11cSYuval Mintz }
1160a7fb11cSYuval Mintz 
1170a7fb11cSYuval Mintz static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
1180a7fb11cSYuval Mintz 				 struct qed_ll2_buffer *buffer)
1190a7fb11cSYuval Mintz {
1200a7fb11cSYuval Mintz 	spin_lock_bh(&cdev->ll2->lock);
1210a7fb11cSYuval Mintz 
1220a7fb11cSYuval Mintz 	dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
1230a7fb11cSYuval Mintz 			 cdev->ll2->rx_size, DMA_FROM_DEVICE);
1240a7fb11cSYuval Mintz 	kfree(buffer->data);
1250a7fb11cSYuval Mintz 	list_del(&buffer->list);
1260a7fb11cSYuval Mintz 
1270a7fb11cSYuval Mintz 	cdev->ll2->rx_cnt--;
1280a7fb11cSYuval Mintz 	if (!cdev->ll2->rx_cnt)
1290a7fb11cSYuval Mintz 		DP_INFO(cdev, "All LL2 entries were removed\n");
1300a7fb11cSYuval Mintz 
1310a7fb11cSYuval Mintz 	spin_unlock_bh(&cdev->ll2->lock);
1320a7fb11cSYuval Mintz 
1330a7fb11cSYuval Mintz 	return 0;
1340a7fb11cSYuval Mintz }
1350a7fb11cSYuval Mintz 
1360a7fb11cSYuval Mintz static void qed_ll2_kill_buffers(struct qed_dev *cdev)
1370a7fb11cSYuval Mintz {
1380a7fb11cSYuval Mintz 	struct qed_ll2_buffer *buffer, *tmp_buffer;
1390a7fb11cSYuval Mintz 
1400a7fb11cSYuval Mintz 	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
1410a7fb11cSYuval Mintz 		qed_ll2_dealloc_buffer(cdev, buffer);
1420a7fb11cSYuval Mintz }
1430a7fb11cSYuval Mintz 
1448c93beafSYuval Mintz static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
1450a7fb11cSYuval Mintz 					u8 connection_handle,
1460a7fb11cSYuval Mintz 					struct qed_ll2_rx_packet *p_pkt,
1470a7fb11cSYuval Mintz 					struct core_rx_fast_path_cqe *p_cqe,
1480a7fb11cSYuval Mintz 					bool b_last_packet)
1490a7fb11cSYuval Mintz {
1500a7fb11cSYuval Mintz 	u16 packet_length = le16_to_cpu(p_cqe->packet_length);
1510a7fb11cSYuval Mintz 	struct qed_ll2_buffer *buffer = p_pkt->cookie;
1520a7fb11cSYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
1530a7fb11cSYuval Mintz 	u16 vlan = le16_to_cpu(p_cqe->vlan);
1540a7fb11cSYuval Mintz 	u32 opaque_data_0, opaque_data_1;
1550a7fb11cSYuval Mintz 	u8 pad = p_cqe->placement_offset;
1560a7fb11cSYuval Mintz 	dma_addr_t new_phys_addr;
1570a7fb11cSYuval Mintz 	struct sk_buff *skb;
1580a7fb11cSYuval Mintz 	bool reuse = false;
1590a7fb11cSYuval Mintz 	int rc = -EINVAL;
1600a7fb11cSYuval Mintz 	u8 *new_data;
1610a7fb11cSYuval Mintz 
1620a7fb11cSYuval Mintz 	opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
1630a7fb11cSYuval Mintz 	opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
1640a7fb11cSYuval Mintz 
1650a7fb11cSYuval Mintz 	DP_VERBOSE(p_hwfn,
1660a7fb11cSYuval Mintz 		   (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
1670a7fb11cSYuval Mintz 		   "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
1680a7fb11cSYuval Mintz 		   (u64)p_pkt->rx_buf_addr, pad, packet_length,
1690a7fb11cSYuval Mintz 		   le16_to_cpu(p_cqe->parse_flags.flags), vlan,
1700a7fb11cSYuval Mintz 		   opaque_data_0, opaque_data_1);
1710a7fb11cSYuval Mintz 
1720a7fb11cSYuval Mintz 	if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
1730a7fb11cSYuval Mintz 		print_hex_dump(KERN_INFO, "",
1740a7fb11cSYuval Mintz 			       DUMP_PREFIX_OFFSET, 16, 1,
1750a7fb11cSYuval Mintz 			       buffer->data, packet_length, false);
1760a7fb11cSYuval Mintz 	}
1770a7fb11cSYuval Mintz 
1780a7fb11cSYuval Mintz 	/* Determine if data is valid */
1790a7fb11cSYuval Mintz 	if (packet_length < ETH_HLEN)
1800a7fb11cSYuval Mintz 		reuse = true;
1810a7fb11cSYuval Mintz 
1820a7fb11cSYuval Mintz 	/* Allocate a replacement for buffer; Reuse upon failure */
1830a7fb11cSYuval Mintz 	if (!reuse)
1840a7fb11cSYuval Mintz 		rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
1850a7fb11cSYuval Mintz 					  &new_phys_addr);
1860a7fb11cSYuval Mintz 
1870a7fb11cSYuval Mintz 	/* If need to reuse or there's no replacement buffer, repost this */
1880a7fb11cSYuval Mintz 	if (rc)
1890a7fb11cSYuval Mintz 		goto out_post;
1900a7fb11cSYuval Mintz 
1910a7fb11cSYuval Mintz 	skb = build_skb(buffer->data, 0);
1920a7fb11cSYuval Mintz 	if (!skb) {
1930a7fb11cSYuval Mintz 		rc = -ENOMEM;
1940a7fb11cSYuval Mintz 		goto out_post;
1950a7fb11cSYuval Mintz 	}
1960a7fb11cSYuval Mintz 
1970a7fb11cSYuval Mintz 	pad += NET_SKB_PAD;
1980a7fb11cSYuval Mintz 	skb_reserve(skb, pad);
1990a7fb11cSYuval Mintz 	skb_put(skb, packet_length);
2000a7fb11cSYuval Mintz 	skb_checksum_none_assert(skb);
2010a7fb11cSYuval Mintz 
2020a7fb11cSYuval Mintz 	/* Get parital ethernet information instead of eth_type_trans(),
2030a7fb11cSYuval Mintz 	 * Since we don't have an associated net_device.
2040a7fb11cSYuval Mintz 	 */
2050a7fb11cSYuval Mintz 	skb_reset_mac_header(skb);
2060a7fb11cSYuval Mintz 	skb->protocol = eth_hdr(skb)->h_proto;
2070a7fb11cSYuval Mintz 
2080a7fb11cSYuval Mintz 	/* Pass SKB onward */
2090a7fb11cSYuval Mintz 	if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
2100a7fb11cSYuval Mintz 		if (vlan)
2110a7fb11cSYuval Mintz 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
2120a7fb11cSYuval Mintz 		cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
2130a7fb11cSYuval Mintz 				      opaque_data_0, opaque_data_1);
2140a7fb11cSYuval Mintz 	}
2150a7fb11cSYuval Mintz 
2160a7fb11cSYuval Mintz 	/* Update Buffer information and update FW producer */
2170a7fb11cSYuval Mintz 	buffer->data = new_data;
2180a7fb11cSYuval Mintz 	buffer->phys_addr = new_phys_addr;
2190a7fb11cSYuval Mintz 
2200a7fb11cSYuval Mintz out_post:
2210a7fb11cSYuval Mintz 	rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
2220a7fb11cSYuval Mintz 				    buffer->phys_addr, 0,  buffer, 1);
2230a7fb11cSYuval Mintz 
2240a7fb11cSYuval Mintz 	if (rc)
2250a7fb11cSYuval Mintz 		qed_ll2_dealloc_buffer(cdev, buffer);
2260a7fb11cSYuval Mintz }
2270a7fb11cSYuval Mintz 
2280a7fb11cSYuval Mintz static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
2290a7fb11cSYuval Mintz 						    u8 connection_handle,
2300a7fb11cSYuval Mintz 						    bool b_lock,
2310a7fb11cSYuval Mintz 						    bool b_only_active)
2320a7fb11cSYuval Mintz {
2330a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
2340a7fb11cSYuval Mintz 
2350a7fb11cSYuval Mintz 	if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
2360a7fb11cSYuval Mintz 		return NULL;
2370a7fb11cSYuval Mintz 
2380a7fb11cSYuval Mintz 	if (!p_hwfn->p_ll2_info)
2390a7fb11cSYuval Mintz 		return NULL;
2400a7fb11cSYuval Mintz 
2410a7fb11cSYuval Mintz 	p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2420a7fb11cSYuval Mintz 
2430a7fb11cSYuval Mintz 	if (b_only_active) {
2440a7fb11cSYuval Mintz 		if (b_lock)
2450a7fb11cSYuval Mintz 			mutex_lock(&p_ll2_conn->mutex);
2460a7fb11cSYuval Mintz 		if (p_ll2_conn->b_active)
2470a7fb11cSYuval Mintz 			p_ret = p_ll2_conn;
2480a7fb11cSYuval Mintz 		if (b_lock)
2490a7fb11cSYuval Mintz 			mutex_unlock(&p_ll2_conn->mutex);
2500a7fb11cSYuval Mintz 	} else {
2510a7fb11cSYuval Mintz 		p_ret = p_ll2_conn;
2520a7fb11cSYuval Mintz 	}
2530a7fb11cSYuval Mintz 
2540a7fb11cSYuval Mintz 	return p_ret;
2550a7fb11cSYuval Mintz }
2560a7fb11cSYuval Mintz 
2570a7fb11cSYuval Mintz static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
2580a7fb11cSYuval Mintz 						  u8 connection_handle)
2590a7fb11cSYuval Mintz {
2600a7fb11cSYuval Mintz 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
2610a7fb11cSYuval Mintz }
2620a7fb11cSYuval Mintz 
2630a7fb11cSYuval Mintz static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
2640a7fb11cSYuval Mintz 						       u8 connection_handle)
2650a7fb11cSYuval Mintz {
2660a7fb11cSYuval Mintz 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
2670a7fb11cSYuval Mintz }
2680a7fb11cSYuval Mintz 
2690a7fb11cSYuval Mintz static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
2700a7fb11cSYuval Mintz 							   *p_hwfn,
2710a7fb11cSYuval Mintz 							   u8 connection_handle)
2720a7fb11cSYuval Mintz {
2730a7fb11cSYuval Mintz 	return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
2740a7fb11cSYuval Mintz }
2750a7fb11cSYuval Mintz 
2760a7fb11cSYuval Mintz static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
2770a7fb11cSYuval Mintz {
2780a7fb11cSYuval Mintz 	bool b_last_packet = false, b_last_frag = false;
2790a7fb11cSYuval Mintz 	struct qed_ll2_tx_packet *p_pkt = NULL;
2800a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn;
2810a7fb11cSYuval Mintz 	struct qed_ll2_tx_queue *p_tx;
282abd49676SRam Amrani 	dma_addr_t tx_frag;
2830a7fb11cSYuval Mintz 
2840a7fb11cSYuval Mintz 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
2850a7fb11cSYuval Mintz 	if (!p_ll2_conn)
2860a7fb11cSYuval Mintz 		return;
2870a7fb11cSYuval Mintz 
2880a7fb11cSYuval Mintz 	p_tx = &p_ll2_conn->tx_queue;
2890a7fb11cSYuval Mintz 
2900a7fb11cSYuval Mintz 	while (!list_empty(&p_tx->active_descq)) {
2910a7fb11cSYuval Mintz 		p_pkt = list_first_entry(&p_tx->active_descq,
2920a7fb11cSYuval Mintz 					 struct qed_ll2_tx_packet, list_entry);
2930a7fb11cSYuval Mintz 		if (!p_pkt)
2940a7fb11cSYuval Mintz 			break;
2950a7fb11cSYuval Mintz 
2960a7fb11cSYuval Mintz 		list_del(&p_pkt->list_entry);
2970a7fb11cSYuval Mintz 		b_last_packet = list_empty(&p_tx->active_descq);
2980a7fb11cSYuval Mintz 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
2990a7fb11cSYuval Mintz 		p_tx->cur_completing_packet = *p_pkt;
3000a7fb11cSYuval Mintz 		p_tx->cur_completing_bd_idx = 1;
3010a7fb11cSYuval Mintz 		b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
302abd49676SRam Amrani 		tx_frag = p_pkt->bds_set[0].tx_frag;
303abd49676SRam Amrani 		if (p_ll2_conn->gsi_enable)
304abd49676SRam Amrani 			qed_ll2b_release_tx_gsi_packet(p_hwfn,
305abd49676SRam Amrani 						       p_ll2_conn->my_id,
3060a7fb11cSYuval Mintz 						       p_pkt->cookie,
307abd49676SRam Amrani 						       tx_frag,
308abd49676SRam Amrani 						       b_last_frag,
309abd49676SRam Amrani 						       b_last_packet);
310abd49676SRam Amrani 		else
311abd49676SRam Amrani 			qed_ll2b_complete_tx_packet(p_hwfn,
312abd49676SRam Amrani 						    p_ll2_conn->my_id,
313abd49676SRam Amrani 						    p_pkt->cookie,
314abd49676SRam Amrani 						    tx_frag,
315abd49676SRam Amrani 						    b_last_frag,
316abd49676SRam Amrani 						    b_last_packet);
317abd49676SRam Amrani 
3180a7fb11cSYuval Mintz 	}
3190a7fb11cSYuval Mintz }
3200a7fb11cSYuval Mintz 
3210a7fb11cSYuval Mintz static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
3220a7fb11cSYuval Mintz {
3230a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn = p_cookie;
3240a7fb11cSYuval Mintz 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
3250a7fb11cSYuval Mintz 	u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
3260a7fb11cSYuval Mintz 	struct qed_ll2_tx_packet *p_pkt;
3270a7fb11cSYuval Mintz 	bool b_last_frag = false;
3280a7fb11cSYuval Mintz 	unsigned long flags;
329abd49676SRam Amrani 	dma_addr_t tx_frag;
3300a7fb11cSYuval Mintz 	int rc = -EINVAL;
3310a7fb11cSYuval Mintz 
3320a7fb11cSYuval Mintz 	spin_lock_irqsave(&p_tx->lock, flags);
3330a7fb11cSYuval Mintz 	if (p_tx->b_completing_packet) {
3340a7fb11cSYuval Mintz 		rc = -EBUSY;
3350a7fb11cSYuval Mintz 		goto out;
3360a7fb11cSYuval Mintz 	}
3370a7fb11cSYuval Mintz 
3380a7fb11cSYuval Mintz 	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
3390a7fb11cSYuval Mintz 	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
3400a7fb11cSYuval Mintz 	while (num_bds) {
3410a7fb11cSYuval Mintz 		if (list_empty(&p_tx->active_descq))
3420a7fb11cSYuval Mintz 			goto out;
3430a7fb11cSYuval Mintz 
3440a7fb11cSYuval Mintz 		p_pkt = list_first_entry(&p_tx->active_descq,
3450a7fb11cSYuval Mintz 					 struct qed_ll2_tx_packet, list_entry);
3460a7fb11cSYuval Mintz 		if (!p_pkt)
3470a7fb11cSYuval Mintz 			goto out;
3480a7fb11cSYuval Mintz 
3490a7fb11cSYuval Mintz 		p_tx->b_completing_packet = true;
3500a7fb11cSYuval Mintz 		p_tx->cur_completing_packet = *p_pkt;
3510a7fb11cSYuval Mintz 		num_bds_in_packet = p_pkt->bd_used;
3520a7fb11cSYuval Mintz 		list_del(&p_pkt->list_entry);
3530a7fb11cSYuval Mintz 
3540a7fb11cSYuval Mintz 		if (num_bds < num_bds_in_packet) {
3550a7fb11cSYuval Mintz 			DP_NOTICE(p_hwfn,
3560a7fb11cSYuval Mintz 				  "Rest of BDs does not cover whole packet\n");
3570a7fb11cSYuval Mintz 			goto out;
3580a7fb11cSYuval Mintz 		}
3590a7fb11cSYuval Mintz 
3600a7fb11cSYuval Mintz 		num_bds -= num_bds_in_packet;
3610a7fb11cSYuval Mintz 		p_tx->bds_idx += num_bds_in_packet;
3620a7fb11cSYuval Mintz 		while (num_bds_in_packet--)
3630a7fb11cSYuval Mintz 			qed_chain_consume(&p_tx->txq_chain);
3640a7fb11cSYuval Mintz 
3650a7fb11cSYuval Mintz 		p_tx->cur_completing_bd_idx = 1;
3660a7fb11cSYuval Mintz 		b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
3670a7fb11cSYuval Mintz 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
3680a7fb11cSYuval Mintz 
3690a7fb11cSYuval Mintz 		spin_unlock_irqrestore(&p_tx->lock, flags);
370abd49676SRam Amrani 		tx_frag = p_pkt->bds_set[0].tx_frag;
371abd49676SRam Amrani 		if (p_ll2_conn->gsi_enable)
372abd49676SRam Amrani 			qed_ll2b_complete_tx_gsi_packet(p_hwfn,
373abd49676SRam Amrani 							p_ll2_conn->my_id,
374abd49676SRam Amrani 							p_pkt->cookie,
375abd49676SRam Amrani 							tx_frag,
376abd49676SRam Amrani 							b_last_frag, !num_bds);
377abd49676SRam Amrani 		else
3780a7fb11cSYuval Mintz 			qed_ll2b_complete_tx_packet(p_hwfn,
3790a7fb11cSYuval Mintz 						    p_ll2_conn->my_id,
3800a7fb11cSYuval Mintz 						    p_pkt->cookie,
381abd49676SRam Amrani 						    tx_frag,
3820a7fb11cSYuval Mintz 						    b_last_frag, !num_bds);
3830a7fb11cSYuval Mintz 		spin_lock_irqsave(&p_tx->lock, flags);
3840a7fb11cSYuval Mintz 	}
3850a7fb11cSYuval Mintz 
3860a7fb11cSYuval Mintz 	p_tx->b_completing_packet = false;
3870a7fb11cSYuval Mintz 	rc = 0;
3880a7fb11cSYuval Mintz out:
3890a7fb11cSYuval Mintz 	spin_unlock_irqrestore(&p_tx->lock, flags);
3900a7fb11cSYuval Mintz 	return rc;
3910a7fb11cSYuval Mintz }
3920a7fb11cSYuval Mintz 
393abd49676SRam Amrani static int
394abd49676SRam Amrani qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
395abd49676SRam Amrani 			   struct qed_ll2_info *p_ll2_info,
396abd49676SRam Amrani 			   union core_rx_cqe_union *p_cqe,
397abd49676SRam Amrani 			   unsigned long lock_flags, bool b_last_cqe)
398abd49676SRam Amrani {
399abd49676SRam Amrani 	struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
400abd49676SRam Amrani 	struct qed_ll2_rx_packet *p_pkt = NULL;
401abd49676SRam Amrani 	u16 packet_length, parse_flags, vlan;
402abd49676SRam Amrani 	u32 src_mac_addrhi;
403abd49676SRam Amrani 	u16 src_mac_addrlo;
404abd49676SRam Amrani 
405abd49676SRam Amrani 	if (!list_empty(&p_rx->active_descq))
406abd49676SRam Amrani 		p_pkt = list_first_entry(&p_rx->active_descq,
407abd49676SRam Amrani 					 struct qed_ll2_rx_packet, list_entry);
408abd49676SRam Amrani 	if (!p_pkt) {
409abd49676SRam Amrani 		DP_NOTICE(p_hwfn,
410abd49676SRam Amrani 			  "GSI Rx completion but active_descq is empty\n");
411abd49676SRam Amrani 		return -EIO;
412abd49676SRam Amrani 	}
413abd49676SRam Amrani 
414abd49676SRam Amrani 	list_del(&p_pkt->list_entry);
415abd49676SRam Amrani 	parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
416abd49676SRam Amrani 	packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
417abd49676SRam Amrani 	vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
418abd49676SRam Amrani 	src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
419abd49676SRam Amrani 	src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
420abd49676SRam Amrani 	if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
421abd49676SRam Amrani 		DP_NOTICE(p_hwfn,
422abd49676SRam Amrani 			  "Mismatch between active_descq and the LL2 Rx chain\n");
423abd49676SRam Amrani 	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
424abd49676SRam Amrani 
425abd49676SRam Amrani 	spin_unlock_irqrestore(&p_rx->lock, lock_flags);
426abd49676SRam Amrani 	qed_ll2b_complete_rx_gsi_packet(p_hwfn,
427abd49676SRam Amrani 					p_ll2_info->my_id,
428abd49676SRam Amrani 					p_pkt->cookie,
429abd49676SRam Amrani 					p_pkt->rx_buf_addr,
430abd49676SRam Amrani 					packet_length,
431abd49676SRam Amrani 					p_cqe->rx_cqe_gsi.data_length_error,
432abd49676SRam Amrani 					parse_flags,
433abd49676SRam Amrani 					vlan,
434abd49676SRam Amrani 					src_mac_addrhi,
435abd49676SRam Amrani 					src_mac_addrlo, b_last_cqe);
436abd49676SRam Amrani 	spin_lock_irqsave(&p_rx->lock, lock_flags);
437abd49676SRam Amrani 
438abd49676SRam Amrani 	return 0;
439abd49676SRam Amrani }
440abd49676SRam Amrani 
4410a7fb11cSYuval Mintz static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
4420a7fb11cSYuval Mintz 				      struct qed_ll2_info *p_ll2_conn,
4430a7fb11cSYuval Mintz 				      union core_rx_cqe_union *p_cqe,
4440a7fb11cSYuval Mintz 				      unsigned long lock_flags,
4450a7fb11cSYuval Mintz 				      bool b_last_cqe)
4460a7fb11cSYuval Mintz {
4470a7fb11cSYuval Mintz 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
4480a7fb11cSYuval Mintz 	struct qed_ll2_rx_packet *p_pkt = NULL;
4490a7fb11cSYuval Mintz 
4500a7fb11cSYuval Mintz 	if (!list_empty(&p_rx->active_descq))
4510a7fb11cSYuval Mintz 		p_pkt = list_first_entry(&p_rx->active_descq,
4520a7fb11cSYuval Mintz 					 struct qed_ll2_rx_packet, list_entry);
4530a7fb11cSYuval Mintz 	if (!p_pkt) {
4540a7fb11cSYuval Mintz 		DP_NOTICE(p_hwfn,
4550a7fb11cSYuval Mintz 			  "LL2 Rx completion but active_descq is empty\n");
4560a7fb11cSYuval Mintz 		return -EIO;
4570a7fb11cSYuval Mintz 	}
4580a7fb11cSYuval Mintz 	list_del(&p_pkt->list_entry);
4590a7fb11cSYuval Mintz 
4600a7fb11cSYuval Mintz 	if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
4610a7fb11cSYuval Mintz 		DP_NOTICE(p_hwfn,
4620a7fb11cSYuval Mintz 			  "Mismatch between active_descq and the LL2 Rx chain\n");
4630a7fb11cSYuval Mintz 	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
4640a7fb11cSYuval Mintz 
4650a7fb11cSYuval Mintz 	spin_unlock_irqrestore(&p_rx->lock, lock_flags);
4660a7fb11cSYuval Mintz 	qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
4670a7fb11cSYuval Mintz 				    p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
4680a7fb11cSYuval Mintz 	spin_lock_irqsave(&p_rx->lock, lock_flags);
4690a7fb11cSYuval Mintz 
4700a7fb11cSYuval Mintz 	return 0;
4710a7fb11cSYuval Mintz }
4720a7fb11cSYuval Mintz 
4730a7fb11cSYuval Mintz static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
4740a7fb11cSYuval Mintz {
4750a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn = cookie;
4760a7fb11cSYuval Mintz 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
4770a7fb11cSYuval Mintz 	union core_rx_cqe_union *cqe = NULL;
4780a7fb11cSYuval Mintz 	u16 cq_new_idx = 0, cq_old_idx = 0;
4790a7fb11cSYuval Mintz 	unsigned long flags = 0;
4800a7fb11cSYuval Mintz 	int rc = 0;
4810a7fb11cSYuval Mintz 
4820a7fb11cSYuval Mintz 	spin_lock_irqsave(&p_rx->lock, flags);
4830a7fb11cSYuval Mintz 	cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
4840a7fb11cSYuval Mintz 	cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
4850a7fb11cSYuval Mintz 
4860a7fb11cSYuval Mintz 	while (cq_new_idx != cq_old_idx) {
4870a7fb11cSYuval Mintz 		bool b_last_cqe = (cq_new_idx == cq_old_idx);
4880a7fb11cSYuval Mintz 
4890a7fb11cSYuval Mintz 		cqe = qed_chain_consume(&p_rx->rcq_chain);
4900a7fb11cSYuval Mintz 		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
4910a7fb11cSYuval Mintz 
4920a7fb11cSYuval Mintz 		DP_VERBOSE(p_hwfn,
4930a7fb11cSYuval Mintz 			   QED_MSG_LL2,
4940a7fb11cSYuval Mintz 			   "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
4950a7fb11cSYuval Mintz 			   cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
4960a7fb11cSYuval Mintz 
4970a7fb11cSYuval Mintz 		switch (cqe->rx_cqe_sp.type) {
4980a7fb11cSYuval Mintz 		case CORE_RX_CQE_TYPE_SLOW_PATH:
4990a7fb11cSYuval Mintz 			DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
5000a7fb11cSYuval Mintz 			rc = -EINVAL;
5010a7fb11cSYuval Mintz 			break;
502abd49676SRam Amrani 		case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
503abd49676SRam Amrani 			rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
504abd49676SRam Amrani 							cqe, flags, b_last_cqe);
505abd49676SRam Amrani 			break;
5060a7fb11cSYuval Mintz 		case CORE_RX_CQE_TYPE_REGULAR:
5070a7fb11cSYuval Mintz 			rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
5080a7fb11cSYuval Mintz 							cqe, flags, b_last_cqe);
5090a7fb11cSYuval Mintz 			break;
5100a7fb11cSYuval Mintz 		default:
5110a7fb11cSYuval Mintz 			rc = -EIO;
5120a7fb11cSYuval Mintz 		}
5130a7fb11cSYuval Mintz 	}
5140a7fb11cSYuval Mintz 
5150a7fb11cSYuval Mintz 	spin_unlock_irqrestore(&p_rx->lock, flags);
5160a7fb11cSYuval Mintz 	return rc;
5170a7fb11cSYuval Mintz }
5180a7fb11cSYuval Mintz 
5198c93beafSYuval Mintz static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
5200a7fb11cSYuval Mintz {
5210a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn = NULL;
5220a7fb11cSYuval Mintz 	struct qed_ll2_rx_packet *p_pkt = NULL;
5230a7fb11cSYuval Mintz 	struct qed_ll2_rx_queue *p_rx;
5240a7fb11cSYuval Mintz 
5250a7fb11cSYuval Mintz 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
5260a7fb11cSYuval Mintz 	if (!p_ll2_conn)
5270a7fb11cSYuval Mintz 		return;
5280a7fb11cSYuval Mintz 
5290a7fb11cSYuval Mintz 	p_rx = &p_ll2_conn->rx_queue;
5300a7fb11cSYuval Mintz 
5310a7fb11cSYuval Mintz 	while (!list_empty(&p_rx->active_descq)) {
5320a7fb11cSYuval Mintz 		dma_addr_t rx_buf_addr;
5330a7fb11cSYuval Mintz 		void *cookie;
5340a7fb11cSYuval Mintz 		bool b_last;
5350a7fb11cSYuval Mintz 
5360a7fb11cSYuval Mintz 		p_pkt = list_first_entry(&p_rx->active_descq,
5370a7fb11cSYuval Mintz 					 struct qed_ll2_rx_packet, list_entry);
5380a7fb11cSYuval Mintz 		if (!p_pkt)
5390a7fb11cSYuval Mintz 			break;
5400a7fb11cSYuval Mintz 
541b4f0fd4bSWei Yongjun 		list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
5420a7fb11cSYuval Mintz 
5430a7fb11cSYuval Mintz 		rx_buf_addr = p_pkt->rx_buf_addr;
5440a7fb11cSYuval Mintz 		cookie = p_pkt->cookie;
5450a7fb11cSYuval Mintz 
5460a7fb11cSYuval Mintz 		b_last = list_empty(&p_rx->active_descq);
5470a7fb11cSYuval Mintz 	}
5480a7fb11cSYuval Mintz }
5490a7fb11cSYuval Mintz 
5500a7fb11cSYuval Mintz static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
5510a7fb11cSYuval Mintz 				     struct qed_ll2_info *p_ll2_conn,
5520a7fb11cSYuval Mintz 				     u8 action_on_error)
5530a7fb11cSYuval Mintz {
5540a7fb11cSYuval Mintz 	enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
5550a7fb11cSYuval Mintz 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
5560a7fb11cSYuval Mintz 	struct core_rx_start_ramrod_data *p_ramrod = NULL;
5570a7fb11cSYuval Mintz 	struct qed_spq_entry *p_ent = NULL;
5580a7fb11cSYuval Mintz 	struct qed_sp_init_data init_data;
5590a7fb11cSYuval Mintz 	u16 cqe_pbl_size;
5600a7fb11cSYuval Mintz 	int rc = 0;
5610a7fb11cSYuval Mintz 
5620a7fb11cSYuval Mintz 	/* Get SPQ entry */
5630a7fb11cSYuval Mintz 	memset(&init_data, 0, sizeof(init_data));
5640a7fb11cSYuval Mintz 	init_data.cid = p_ll2_conn->cid;
5650a7fb11cSYuval Mintz 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
5660a7fb11cSYuval Mintz 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
5670a7fb11cSYuval Mintz 
5680a7fb11cSYuval Mintz 	rc = qed_sp_init_request(p_hwfn, &p_ent,
5690a7fb11cSYuval Mintz 				 CORE_RAMROD_RX_QUEUE_START,
5700a7fb11cSYuval Mintz 				 PROTOCOLID_CORE, &init_data);
5710a7fb11cSYuval Mintz 	if (rc)
5720a7fb11cSYuval Mintz 		return rc;
5730a7fb11cSYuval Mintz 
5740a7fb11cSYuval Mintz 	p_ramrod = &p_ent->ramrod.core_rx_queue_start;
5750a7fb11cSYuval Mintz 
5760a7fb11cSYuval Mintz 	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
5770a7fb11cSYuval Mintz 	p_ramrod->sb_index = p_rx->rx_sb_index;
5780a7fb11cSYuval Mintz 	p_ramrod->complete_event_flg = 1;
5790a7fb11cSYuval Mintz 
5800a7fb11cSYuval Mintz 	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
5810a7fb11cSYuval Mintz 	DMA_REGPAIR_LE(p_ramrod->bd_base,
5820a7fb11cSYuval Mintz 		       p_rx->rxq_chain.p_phys_addr);
5830a7fb11cSYuval Mintz 	cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
5840a7fb11cSYuval Mintz 	p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
5850a7fb11cSYuval Mintz 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
5860a7fb11cSYuval Mintz 		       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
5870a7fb11cSYuval Mintz 
5880a7fb11cSYuval Mintz 	p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
5890a7fb11cSYuval Mintz 	p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
5900a7fb11cSYuval Mintz 	p_ramrod->queue_id = p_ll2_conn->queue_id;
5910a7fb11cSYuval Mintz 	p_ramrod->main_func_queue = 1;
5920a7fb11cSYuval Mintz 
5930a7fb11cSYuval Mintz 	if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
5940a7fb11cSYuval Mintz 	    p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
5950a7fb11cSYuval Mintz 		p_ramrod->mf_si_bcast_accept_all = 1;
5960a7fb11cSYuval Mintz 		p_ramrod->mf_si_mcast_accept_all = 1;
5970a7fb11cSYuval Mintz 	} else {
5980a7fb11cSYuval Mintz 		p_ramrod->mf_si_bcast_accept_all = 0;
5990a7fb11cSYuval Mintz 		p_ramrod->mf_si_mcast_accept_all = 0;
6000a7fb11cSYuval Mintz 	}
6010a7fb11cSYuval Mintz 
6020a7fb11cSYuval Mintz 	p_ramrod->action_on_error.error_type = action_on_error;
603abd49676SRam Amrani 	p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
6040a7fb11cSYuval Mintz 	return qed_spq_post(p_hwfn, p_ent, NULL);
6050a7fb11cSYuval Mintz }
6060a7fb11cSYuval Mintz 
6070a7fb11cSYuval Mintz static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
6080a7fb11cSYuval Mintz 				     struct qed_ll2_info *p_ll2_conn)
6090a7fb11cSYuval Mintz {
6100a7fb11cSYuval Mintz 	enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
6110a7fb11cSYuval Mintz 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
6120a7fb11cSYuval Mintz 	struct core_tx_start_ramrod_data *p_ramrod = NULL;
6130a7fb11cSYuval Mintz 	struct qed_spq_entry *p_ent = NULL;
6140a7fb11cSYuval Mintz 	struct qed_sp_init_data init_data;
6150a7fb11cSYuval Mintz 	union qed_qm_pq_params pq_params;
6160a7fb11cSYuval Mintz 	u16 pq_id = 0, pbl_size;
6170a7fb11cSYuval Mintz 	int rc = -EINVAL;
6180a7fb11cSYuval Mintz 
6190a7fb11cSYuval Mintz 	if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
6200a7fb11cSYuval Mintz 		return 0;
6210a7fb11cSYuval Mintz 
6220a7fb11cSYuval Mintz 	/* Get SPQ entry */
6230a7fb11cSYuval Mintz 	memset(&init_data, 0, sizeof(init_data));
6240a7fb11cSYuval Mintz 	init_data.cid = p_ll2_conn->cid;
6250a7fb11cSYuval Mintz 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
6260a7fb11cSYuval Mintz 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
6270a7fb11cSYuval Mintz 
6280a7fb11cSYuval Mintz 	rc = qed_sp_init_request(p_hwfn, &p_ent,
6290a7fb11cSYuval Mintz 				 CORE_RAMROD_TX_QUEUE_START,
6300a7fb11cSYuval Mintz 				 PROTOCOLID_CORE, &init_data);
6310a7fb11cSYuval Mintz 	if (rc)
6320a7fb11cSYuval Mintz 		return rc;
6330a7fb11cSYuval Mintz 
6340a7fb11cSYuval Mintz 	p_ramrod = &p_ent->ramrod.core_tx_queue_start;
6350a7fb11cSYuval Mintz 
6360a7fb11cSYuval Mintz 	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
6370a7fb11cSYuval Mintz 	p_ramrod->sb_index = p_tx->tx_sb_index;
6380a7fb11cSYuval Mintz 	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
6390a7fb11cSYuval Mintz 	p_ll2_conn->tx_stats_en = 1;
6400a7fb11cSYuval Mintz 	p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
6410a7fb11cSYuval Mintz 	p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
6420a7fb11cSYuval Mintz 
6430a7fb11cSYuval Mintz 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
6440a7fb11cSYuval Mintz 		       qed_chain_get_pbl_phys(&p_tx->txq_chain));
6450a7fb11cSYuval Mintz 	pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
6460a7fb11cSYuval Mintz 	p_ramrod->pbl_size = cpu_to_le16(pbl_size);
6470a7fb11cSYuval Mintz 
6480a7fb11cSYuval Mintz 	memset(&pq_params, 0, sizeof(pq_params));
6490a7fb11cSYuval Mintz 	pq_params.core.tc = p_ll2_conn->tx_tc;
6500a7fb11cSYuval Mintz 	pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
6510a7fb11cSYuval Mintz 	p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
6520a7fb11cSYuval Mintz 
6530a7fb11cSYuval Mintz 	switch (conn_type) {
6540a7fb11cSYuval Mintz 	case QED_LL2_TYPE_ISCSI:
6550a7fb11cSYuval Mintz 	case QED_LL2_TYPE_ISCSI_OOO:
6560a7fb11cSYuval Mintz 		p_ramrod->conn_type = PROTOCOLID_ISCSI;
6570a7fb11cSYuval Mintz 		break;
6580a7fb11cSYuval Mintz 	case QED_LL2_TYPE_ROCE:
6590a7fb11cSYuval Mintz 		p_ramrod->conn_type = PROTOCOLID_ROCE;
6600a7fb11cSYuval Mintz 		break;
6610a7fb11cSYuval Mintz 	default:
6620a7fb11cSYuval Mintz 		p_ramrod->conn_type = PROTOCOLID_ETH;
6630a7fb11cSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
6640a7fb11cSYuval Mintz 	}
6650a7fb11cSYuval Mintz 
666abd49676SRam Amrani 	p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
6670a7fb11cSYuval Mintz 	return qed_spq_post(p_hwfn, p_ent, NULL);
6680a7fb11cSYuval Mintz }
6690a7fb11cSYuval Mintz 
6700a7fb11cSYuval Mintz static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
6710a7fb11cSYuval Mintz 				    struct qed_ll2_info *p_ll2_conn)
6720a7fb11cSYuval Mintz {
6730a7fb11cSYuval Mintz 	struct core_rx_stop_ramrod_data *p_ramrod = NULL;
6740a7fb11cSYuval Mintz 	struct qed_spq_entry *p_ent = NULL;
6750a7fb11cSYuval Mintz 	struct qed_sp_init_data init_data;
6760a7fb11cSYuval Mintz 	int rc = -EINVAL;
6770a7fb11cSYuval Mintz 
6780a7fb11cSYuval Mintz 	/* Get SPQ entry */
6790a7fb11cSYuval Mintz 	memset(&init_data, 0, sizeof(init_data));
6800a7fb11cSYuval Mintz 	init_data.cid = p_ll2_conn->cid;
6810a7fb11cSYuval Mintz 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
6820a7fb11cSYuval Mintz 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
6830a7fb11cSYuval Mintz 
6840a7fb11cSYuval Mintz 	rc = qed_sp_init_request(p_hwfn, &p_ent,
6850a7fb11cSYuval Mintz 				 CORE_RAMROD_RX_QUEUE_STOP,
6860a7fb11cSYuval Mintz 				 PROTOCOLID_CORE, &init_data);
6870a7fb11cSYuval Mintz 	if (rc)
6880a7fb11cSYuval Mintz 		return rc;
6890a7fb11cSYuval Mintz 
6900a7fb11cSYuval Mintz 	p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
6910a7fb11cSYuval Mintz 
6920a7fb11cSYuval Mintz 	p_ramrod->complete_event_flg = 1;
6930a7fb11cSYuval Mintz 	p_ramrod->queue_id = p_ll2_conn->queue_id;
6940a7fb11cSYuval Mintz 
6950a7fb11cSYuval Mintz 	return qed_spq_post(p_hwfn, p_ent, NULL);
6960a7fb11cSYuval Mintz }
6970a7fb11cSYuval Mintz 
6980a7fb11cSYuval Mintz static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
6990a7fb11cSYuval Mintz 				    struct qed_ll2_info *p_ll2_conn)
7000a7fb11cSYuval Mintz {
7010a7fb11cSYuval Mintz 	struct qed_spq_entry *p_ent = NULL;
7020a7fb11cSYuval Mintz 	struct qed_sp_init_data init_data;
7030a7fb11cSYuval Mintz 	int rc = -EINVAL;
7040a7fb11cSYuval Mintz 
7050a7fb11cSYuval Mintz 	/* Get SPQ entry */
7060a7fb11cSYuval Mintz 	memset(&init_data, 0, sizeof(init_data));
7070a7fb11cSYuval Mintz 	init_data.cid = p_ll2_conn->cid;
7080a7fb11cSYuval Mintz 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
7090a7fb11cSYuval Mintz 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
7100a7fb11cSYuval Mintz 
7110a7fb11cSYuval Mintz 	rc = qed_sp_init_request(p_hwfn, &p_ent,
7120a7fb11cSYuval Mintz 				 CORE_RAMROD_TX_QUEUE_STOP,
7130a7fb11cSYuval Mintz 				 PROTOCOLID_CORE, &init_data);
7140a7fb11cSYuval Mintz 	if (rc)
7150a7fb11cSYuval Mintz 		return rc;
7160a7fb11cSYuval Mintz 
7170a7fb11cSYuval Mintz 	return qed_spq_post(p_hwfn, p_ent, NULL);
7180a7fb11cSYuval Mintz }
7190a7fb11cSYuval Mintz 
7200a7fb11cSYuval Mintz static int
7210a7fb11cSYuval Mintz qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
7220a7fb11cSYuval Mintz 			      struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
7230a7fb11cSYuval Mintz {
7240a7fb11cSYuval Mintz 	struct qed_ll2_rx_packet *p_descq;
7250a7fb11cSYuval Mintz 	u32 capacity;
7260a7fb11cSYuval Mintz 	int rc = 0;
7270a7fb11cSYuval Mintz 
7280a7fb11cSYuval Mintz 	if (!rx_num_desc)
7290a7fb11cSYuval Mintz 		goto out;
7300a7fb11cSYuval Mintz 
7310a7fb11cSYuval Mintz 	rc = qed_chain_alloc(p_hwfn->cdev,
7320a7fb11cSYuval Mintz 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
7330a7fb11cSYuval Mintz 			     QED_CHAIN_MODE_NEXT_PTR,
7340a7fb11cSYuval Mintz 			     QED_CHAIN_CNT_TYPE_U16,
7350a7fb11cSYuval Mintz 			     rx_num_desc,
7360a7fb11cSYuval Mintz 			     sizeof(struct core_rx_bd),
7370a7fb11cSYuval Mintz 			     &p_ll2_info->rx_queue.rxq_chain);
7380a7fb11cSYuval Mintz 	if (rc) {
7390a7fb11cSYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
7400a7fb11cSYuval Mintz 		goto out;
7410a7fb11cSYuval Mintz 	}
7420a7fb11cSYuval Mintz 
7430a7fb11cSYuval Mintz 	capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
7440a7fb11cSYuval Mintz 	p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
7450a7fb11cSYuval Mintz 			  GFP_KERNEL);
7460a7fb11cSYuval Mintz 	if (!p_descq) {
7470a7fb11cSYuval Mintz 		rc = -ENOMEM;
7480a7fb11cSYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
7490a7fb11cSYuval Mintz 		goto out;
7500a7fb11cSYuval Mintz 	}
7510a7fb11cSYuval Mintz 	p_ll2_info->rx_queue.descq_array = p_descq;
7520a7fb11cSYuval Mintz 
7530a7fb11cSYuval Mintz 	rc = qed_chain_alloc(p_hwfn->cdev,
7540a7fb11cSYuval Mintz 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
7550a7fb11cSYuval Mintz 			     QED_CHAIN_MODE_PBL,
7560a7fb11cSYuval Mintz 			     QED_CHAIN_CNT_TYPE_U16,
7570a7fb11cSYuval Mintz 			     rx_num_desc,
7580a7fb11cSYuval Mintz 			     sizeof(struct core_rx_fast_path_cqe),
7590a7fb11cSYuval Mintz 			     &p_ll2_info->rx_queue.rcq_chain);
7600a7fb11cSYuval Mintz 	if (rc) {
7610a7fb11cSYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
7620a7fb11cSYuval Mintz 		goto out;
7630a7fb11cSYuval Mintz 	}
7640a7fb11cSYuval Mintz 
7650a7fb11cSYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
7660a7fb11cSYuval Mintz 		   "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
7670a7fb11cSYuval Mintz 		   p_ll2_info->conn_type, rx_num_desc);
7680a7fb11cSYuval Mintz 
7690a7fb11cSYuval Mintz out:
7700a7fb11cSYuval Mintz 	return rc;
7710a7fb11cSYuval Mintz }
7720a7fb11cSYuval Mintz 
7730a7fb11cSYuval Mintz static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
7740a7fb11cSYuval Mintz 					 struct qed_ll2_info *p_ll2_info,
7750a7fb11cSYuval Mintz 					 u16 tx_num_desc)
7760a7fb11cSYuval Mintz {
7770a7fb11cSYuval Mintz 	struct qed_ll2_tx_packet *p_descq;
7780a7fb11cSYuval Mintz 	u32 capacity;
7790a7fb11cSYuval Mintz 	int rc = 0;
7800a7fb11cSYuval Mintz 
7810a7fb11cSYuval Mintz 	if (!tx_num_desc)
7820a7fb11cSYuval Mintz 		goto out;
7830a7fb11cSYuval Mintz 
7840a7fb11cSYuval Mintz 	rc = qed_chain_alloc(p_hwfn->cdev,
7850a7fb11cSYuval Mintz 			     QED_CHAIN_USE_TO_CONSUME_PRODUCE,
7860a7fb11cSYuval Mintz 			     QED_CHAIN_MODE_PBL,
7870a7fb11cSYuval Mintz 			     QED_CHAIN_CNT_TYPE_U16,
7880a7fb11cSYuval Mintz 			     tx_num_desc,
7890a7fb11cSYuval Mintz 			     sizeof(struct core_tx_bd),
7900a7fb11cSYuval Mintz 			     &p_ll2_info->tx_queue.txq_chain);
7910a7fb11cSYuval Mintz 	if (rc)
7920a7fb11cSYuval Mintz 		goto out;
7930a7fb11cSYuval Mintz 
7940a7fb11cSYuval Mintz 	capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
7950a7fb11cSYuval Mintz 	p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
7960a7fb11cSYuval Mintz 			  GFP_KERNEL);
7970a7fb11cSYuval Mintz 	if (!p_descq) {
7980a7fb11cSYuval Mintz 		rc = -ENOMEM;
7990a7fb11cSYuval Mintz 		goto out;
8000a7fb11cSYuval Mintz 	}
8010a7fb11cSYuval Mintz 	p_ll2_info->tx_queue.descq_array = p_descq;
8020a7fb11cSYuval Mintz 
8030a7fb11cSYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
8040a7fb11cSYuval Mintz 		   "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
8050a7fb11cSYuval Mintz 		   p_ll2_info->conn_type, tx_num_desc);
8060a7fb11cSYuval Mintz 
8070a7fb11cSYuval Mintz out:
8080a7fb11cSYuval Mintz 	if (rc)
8090a7fb11cSYuval Mintz 		DP_NOTICE(p_hwfn,
8100a7fb11cSYuval Mintz 			  "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
8110a7fb11cSYuval Mintz 			  tx_num_desc);
8120a7fb11cSYuval Mintz 	return rc;
8130a7fb11cSYuval Mintz }
8140a7fb11cSYuval Mintz 
8150a7fb11cSYuval Mintz int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
8160a7fb11cSYuval Mintz 			       struct qed_ll2_info *p_params,
8170a7fb11cSYuval Mintz 			       u16 rx_num_desc,
8180a7fb11cSYuval Mintz 			       u16 tx_num_desc,
8190a7fb11cSYuval Mintz 			       u8 *p_connection_handle)
8200a7fb11cSYuval Mintz {
8210a7fb11cSYuval Mintz 	qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
8220a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_info = NULL;
8230a7fb11cSYuval Mintz 	int rc;
8240a7fb11cSYuval Mintz 	u8 i;
8250a7fb11cSYuval Mintz 
8260a7fb11cSYuval Mintz 	if (!p_connection_handle || !p_hwfn->p_ll2_info)
8270a7fb11cSYuval Mintz 		return -EINVAL;
8280a7fb11cSYuval Mintz 
8290a7fb11cSYuval Mintz 	/* Find a free connection to be used */
8300a7fb11cSYuval Mintz 	for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
8310a7fb11cSYuval Mintz 		mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
8320a7fb11cSYuval Mintz 		if (p_hwfn->p_ll2_info[i].b_active) {
8330a7fb11cSYuval Mintz 			mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
8340a7fb11cSYuval Mintz 			continue;
8350a7fb11cSYuval Mintz 		}
8360a7fb11cSYuval Mintz 
8370a7fb11cSYuval Mintz 		p_hwfn->p_ll2_info[i].b_active = true;
8380a7fb11cSYuval Mintz 		p_ll2_info = &p_hwfn->p_ll2_info[i];
8390a7fb11cSYuval Mintz 		mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
8400a7fb11cSYuval Mintz 		break;
8410a7fb11cSYuval Mintz 	}
8420a7fb11cSYuval Mintz 	if (!p_ll2_info)
8430a7fb11cSYuval Mintz 		return -EBUSY;
8440a7fb11cSYuval Mintz 
8450a7fb11cSYuval Mintz 	p_ll2_info->conn_type = p_params->conn_type;
8460a7fb11cSYuval Mintz 	p_ll2_info->mtu = p_params->mtu;
8470a7fb11cSYuval Mintz 	p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
8480a7fb11cSYuval Mintz 	p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
8490a7fb11cSYuval Mintz 	p_ll2_info->tx_tc = p_params->tx_tc;
8500a7fb11cSYuval Mintz 	p_ll2_info->tx_dest = p_params->tx_dest;
8510a7fb11cSYuval Mintz 	p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
8520a7fb11cSYuval Mintz 	p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
853abd49676SRam Amrani 	p_ll2_info->gsi_enable = p_params->gsi_enable;
8540a7fb11cSYuval Mintz 
8550a7fb11cSYuval Mintz 	rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
8560a7fb11cSYuval Mintz 	if (rc)
8570a7fb11cSYuval Mintz 		goto q_allocate_fail;
8580a7fb11cSYuval Mintz 
8590a7fb11cSYuval Mintz 	rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
8600a7fb11cSYuval Mintz 	if (rc)
8610a7fb11cSYuval Mintz 		goto q_allocate_fail;
8620a7fb11cSYuval Mintz 
8630a7fb11cSYuval Mintz 	/* Register callbacks for the Rx/Tx queues */
8640a7fb11cSYuval Mintz 	comp_rx_cb = qed_ll2_rxq_completion;
8650a7fb11cSYuval Mintz 	comp_tx_cb = qed_ll2_txq_completion;
8660a7fb11cSYuval Mintz 
8670a7fb11cSYuval Mintz 	if (rx_num_desc) {
8680a7fb11cSYuval Mintz 		qed_int_register_cb(p_hwfn, comp_rx_cb,
8690a7fb11cSYuval Mintz 				    &p_hwfn->p_ll2_info[i],
8700a7fb11cSYuval Mintz 				    &p_ll2_info->rx_queue.rx_sb_index,
8710a7fb11cSYuval Mintz 				    &p_ll2_info->rx_queue.p_fw_cons);
8720a7fb11cSYuval Mintz 		p_ll2_info->rx_queue.b_cb_registred = true;
8730a7fb11cSYuval Mintz 	}
8740a7fb11cSYuval Mintz 
8750a7fb11cSYuval Mintz 	if (tx_num_desc) {
8760a7fb11cSYuval Mintz 		qed_int_register_cb(p_hwfn,
8770a7fb11cSYuval Mintz 				    comp_tx_cb,
8780a7fb11cSYuval Mintz 				    &p_hwfn->p_ll2_info[i],
8790a7fb11cSYuval Mintz 				    &p_ll2_info->tx_queue.tx_sb_index,
8800a7fb11cSYuval Mintz 				    &p_ll2_info->tx_queue.p_fw_cons);
8810a7fb11cSYuval Mintz 		p_ll2_info->tx_queue.b_cb_registred = true;
8820a7fb11cSYuval Mintz 	}
8830a7fb11cSYuval Mintz 
8840a7fb11cSYuval Mintz 	*p_connection_handle = i;
8850a7fb11cSYuval Mintz 	return rc;
8860a7fb11cSYuval Mintz 
8870a7fb11cSYuval Mintz q_allocate_fail:
8880a7fb11cSYuval Mintz 	qed_ll2_release_connection(p_hwfn, i);
8890a7fb11cSYuval Mintz 	return -ENOMEM;
8900a7fb11cSYuval Mintz }
8910a7fb11cSYuval Mintz 
8920a7fb11cSYuval Mintz static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
8930a7fb11cSYuval Mintz 					   struct qed_ll2_info *p_ll2_conn)
8940a7fb11cSYuval Mintz {
8950a7fb11cSYuval Mintz 	u8 action_on_error = 0;
8960a7fb11cSYuval Mintz 
8970a7fb11cSYuval Mintz 	if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
8980a7fb11cSYuval Mintz 		return 0;
8990a7fb11cSYuval Mintz 
9000a7fb11cSYuval Mintz 	DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
9010a7fb11cSYuval Mintz 
9020a7fb11cSYuval Mintz 	SET_FIELD(action_on_error,
9030a7fb11cSYuval Mintz 		  CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
9040a7fb11cSYuval Mintz 		  p_ll2_conn->ai_err_packet_too_big);
9050a7fb11cSYuval Mintz 	SET_FIELD(action_on_error,
9060a7fb11cSYuval Mintz 		  CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
9070a7fb11cSYuval Mintz 
9080a7fb11cSYuval Mintz 	return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
9090a7fb11cSYuval Mintz }
9100a7fb11cSYuval Mintz 
9110a7fb11cSYuval Mintz int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
9120a7fb11cSYuval Mintz {
9130a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn;
9140a7fb11cSYuval Mintz 	struct qed_ll2_rx_queue *p_rx;
9150a7fb11cSYuval Mintz 	struct qed_ll2_tx_queue *p_tx;
9160a7fb11cSYuval Mintz 	int rc = -EINVAL;
9170a7fb11cSYuval Mintz 	u32 i, capacity;
9180a7fb11cSYuval Mintz 	u8 qid;
9190a7fb11cSYuval Mintz 
9200a7fb11cSYuval Mintz 	p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
9210a7fb11cSYuval Mintz 	if (!p_ll2_conn)
9220a7fb11cSYuval Mintz 		return -EINVAL;
9230a7fb11cSYuval Mintz 	p_rx = &p_ll2_conn->rx_queue;
9240a7fb11cSYuval Mintz 	p_tx = &p_ll2_conn->tx_queue;
9250a7fb11cSYuval Mintz 
9260a7fb11cSYuval Mintz 	qed_chain_reset(&p_rx->rxq_chain);
9270a7fb11cSYuval Mintz 	qed_chain_reset(&p_rx->rcq_chain);
9280a7fb11cSYuval Mintz 	INIT_LIST_HEAD(&p_rx->active_descq);
9290a7fb11cSYuval Mintz 	INIT_LIST_HEAD(&p_rx->free_descq);
9300a7fb11cSYuval Mintz 	INIT_LIST_HEAD(&p_rx->posting_descq);
9310a7fb11cSYuval Mintz 	spin_lock_init(&p_rx->lock);
9320a7fb11cSYuval Mintz 	capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
9330a7fb11cSYuval Mintz 	for (i = 0; i < capacity; i++)
9340a7fb11cSYuval Mintz 		list_add_tail(&p_rx->descq_array[i].list_entry,
9350a7fb11cSYuval Mintz 			      &p_rx->free_descq);
9360a7fb11cSYuval Mintz 	*p_rx->p_fw_cons = 0;
9370a7fb11cSYuval Mintz 
9380a7fb11cSYuval Mintz 	qed_chain_reset(&p_tx->txq_chain);
9390a7fb11cSYuval Mintz 	INIT_LIST_HEAD(&p_tx->active_descq);
9400a7fb11cSYuval Mintz 	INIT_LIST_HEAD(&p_tx->free_descq);
9410a7fb11cSYuval Mintz 	INIT_LIST_HEAD(&p_tx->sending_descq);
9420a7fb11cSYuval Mintz 	spin_lock_init(&p_tx->lock);
9430a7fb11cSYuval Mintz 	capacity = qed_chain_get_capacity(&p_tx->txq_chain);
9440a7fb11cSYuval Mintz 	for (i = 0; i < capacity; i++)
9450a7fb11cSYuval Mintz 		list_add_tail(&p_tx->descq_array[i].list_entry,
9460a7fb11cSYuval Mintz 			      &p_tx->free_descq);
9470a7fb11cSYuval Mintz 	p_tx->cur_completing_bd_idx = 0;
9480a7fb11cSYuval Mintz 	p_tx->bds_idx = 0;
9490a7fb11cSYuval Mintz 	p_tx->b_completing_packet = false;
9500a7fb11cSYuval Mintz 	p_tx->cur_send_packet = NULL;
9510a7fb11cSYuval Mintz 	p_tx->cur_send_frag_num = 0;
9520a7fb11cSYuval Mintz 	p_tx->cur_completing_frag_num = 0;
9530a7fb11cSYuval Mintz 	*p_tx->p_fw_cons = 0;
9540a7fb11cSYuval Mintz 
9550a7fb11cSYuval Mintz 	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
9560a7fb11cSYuval Mintz 
9570a7fb11cSYuval Mintz 	qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
9580a7fb11cSYuval Mintz 	p_ll2_conn->queue_id = qid;
9590a7fb11cSYuval Mintz 	p_ll2_conn->tx_stats_id = qid;
9600a7fb11cSYuval Mintz 	p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
9610a7fb11cSYuval Mintz 					    GTT_BAR0_MAP_REG_TSDM_RAM +
9620a7fb11cSYuval Mintz 					    TSTORM_LL2_RX_PRODS_OFFSET(qid);
9630a7fb11cSYuval Mintz 	p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
9640a7fb11cSYuval Mintz 					    qed_db_addr(p_ll2_conn->cid,
9650a7fb11cSYuval Mintz 							DQ_DEMS_LEGACY);
9660a7fb11cSYuval Mintz 
9670a7fb11cSYuval Mintz 	rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
9680a7fb11cSYuval Mintz 	if (rc)
9690a7fb11cSYuval Mintz 		return rc;
9700a7fb11cSYuval Mintz 
9710a7fb11cSYuval Mintz 	rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
9720a7fb11cSYuval Mintz 	if (rc)
9730a7fb11cSYuval Mintz 		return rc;
9740a7fb11cSYuval Mintz 
9750a7fb11cSYuval Mintz 	if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
9760a7fb11cSYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
9770a7fb11cSYuval Mintz 
9780a7fb11cSYuval Mintz 	return rc;
9790a7fb11cSYuval Mintz }
9800a7fb11cSYuval Mintz 
9810a7fb11cSYuval Mintz static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
9820a7fb11cSYuval Mintz 					     struct qed_ll2_rx_queue *p_rx,
9830a7fb11cSYuval Mintz 					     struct qed_ll2_rx_packet *p_curp)
9840a7fb11cSYuval Mintz {
9850a7fb11cSYuval Mintz 	struct qed_ll2_rx_packet *p_posting_packet = NULL;
9860a7fb11cSYuval Mintz 	struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
9870a7fb11cSYuval Mintz 	bool b_notify_fw = false;
9880a7fb11cSYuval Mintz 	u16 bd_prod, cq_prod;
9890a7fb11cSYuval Mintz 
9900a7fb11cSYuval Mintz 	/* This handles the flushing of already posted buffers */
9910a7fb11cSYuval Mintz 	while (!list_empty(&p_rx->posting_descq)) {
9920a7fb11cSYuval Mintz 		p_posting_packet = list_first_entry(&p_rx->posting_descq,
9930a7fb11cSYuval Mintz 						    struct qed_ll2_rx_packet,
9940a7fb11cSYuval Mintz 						    list_entry);
995b4f0fd4bSWei Yongjun 		list_move_tail(&p_posting_packet->list_entry,
9960a7fb11cSYuval Mintz 			       &p_rx->active_descq);
9970a7fb11cSYuval Mintz 		b_notify_fw = true;
9980a7fb11cSYuval Mintz 	}
9990a7fb11cSYuval Mintz 
10000a7fb11cSYuval Mintz 	/* This handles the supplied packet [if there is one] */
10010a7fb11cSYuval Mintz 	if (p_curp) {
10020a7fb11cSYuval Mintz 		list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
10030a7fb11cSYuval Mintz 		b_notify_fw = true;
10040a7fb11cSYuval Mintz 	}
10050a7fb11cSYuval Mintz 
10060a7fb11cSYuval Mintz 	if (!b_notify_fw)
10070a7fb11cSYuval Mintz 		return;
10080a7fb11cSYuval Mintz 
10090a7fb11cSYuval Mintz 	bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
10100a7fb11cSYuval Mintz 	cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
10110a7fb11cSYuval Mintz 	rx_prod.bd_prod = cpu_to_le16(bd_prod);
10120a7fb11cSYuval Mintz 	rx_prod.cqe_prod = cpu_to_le16(cq_prod);
10130a7fb11cSYuval Mintz 	DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
10140a7fb11cSYuval Mintz }
10150a7fb11cSYuval Mintz 
10160a7fb11cSYuval Mintz int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
10170a7fb11cSYuval Mintz 			   u8 connection_handle,
10180a7fb11cSYuval Mintz 			   dma_addr_t addr,
10190a7fb11cSYuval Mintz 			   u16 buf_len, void *cookie, u8 notify_fw)
10200a7fb11cSYuval Mintz {
10210a7fb11cSYuval Mintz 	struct core_rx_bd_with_buff_len *p_curb = NULL;
10220a7fb11cSYuval Mintz 	struct qed_ll2_rx_packet *p_curp = NULL;
10230a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn;
10240a7fb11cSYuval Mintz 	struct qed_ll2_rx_queue *p_rx;
10250a7fb11cSYuval Mintz 	unsigned long flags;
10260a7fb11cSYuval Mintz 	void *p_data;
10270a7fb11cSYuval Mintz 	int rc = 0;
10280a7fb11cSYuval Mintz 
10290a7fb11cSYuval Mintz 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
10300a7fb11cSYuval Mintz 	if (!p_ll2_conn)
10310a7fb11cSYuval Mintz 		return -EINVAL;
10320a7fb11cSYuval Mintz 	p_rx = &p_ll2_conn->rx_queue;
10330a7fb11cSYuval Mintz 
10340a7fb11cSYuval Mintz 	spin_lock_irqsave(&p_rx->lock, flags);
10350a7fb11cSYuval Mintz 	if (!list_empty(&p_rx->free_descq))
10360a7fb11cSYuval Mintz 		p_curp = list_first_entry(&p_rx->free_descq,
10370a7fb11cSYuval Mintz 					  struct qed_ll2_rx_packet, list_entry);
10380a7fb11cSYuval Mintz 	if (p_curp) {
10390a7fb11cSYuval Mintz 		if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
10400a7fb11cSYuval Mintz 		    qed_chain_get_elem_left(&p_rx->rcq_chain)) {
10410a7fb11cSYuval Mintz 			p_data = qed_chain_produce(&p_rx->rxq_chain);
10420a7fb11cSYuval Mintz 			p_curb = (struct core_rx_bd_with_buff_len *)p_data;
10430a7fb11cSYuval Mintz 			qed_chain_produce(&p_rx->rcq_chain);
10440a7fb11cSYuval Mintz 		}
10450a7fb11cSYuval Mintz 	}
10460a7fb11cSYuval Mintz 
10470a7fb11cSYuval Mintz 	/* If we're lacking entires, let's try to flush buffers to FW */
10480a7fb11cSYuval Mintz 	if (!p_curp || !p_curb) {
10490a7fb11cSYuval Mintz 		rc = -EBUSY;
10500a7fb11cSYuval Mintz 		p_curp = NULL;
10510a7fb11cSYuval Mintz 		goto out_notify;
10520a7fb11cSYuval Mintz 	}
10530a7fb11cSYuval Mintz 
10540a7fb11cSYuval Mintz 	/* We have an Rx packet we can fill */
10550a7fb11cSYuval Mintz 	DMA_REGPAIR_LE(p_curb->addr, addr);
10560a7fb11cSYuval Mintz 	p_curb->buff_length = cpu_to_le16(buf_len);
10570a7fb11cSYuval Mintz 	p_curp->rx_buf_addr = addr;
10580a7fb11cSYuval Mintz 	p_curp->cookie = cookie;
10590a7fb11cSYuval Mintz 	p_curp->rxq_bd = p_curb;
10600a7fb11cSYuval Mintz 	p_curp->buf_length = buf_len;
10610a7fb11cSYuval Mintz 	list_del(&p_curp->list_entry);
10620a7fb11cSYuval Mintz 
10630a7fb11cSYuval Mintz 	/* Check if we only want to enqueue this packet without informing FW */
10640a7fb11cSYuval Mintz 	if (!notify_fw) {
10650a7fb11cSYuval Mintz 		list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
10660a7fb11cSYuval Mintz 		goto out;
10670a7fb11cSYuval Mintz 	}
10680a7fb11cSYuval Mintz 
10690a7fb11cSYuval Mintz out_notify:
10700a7fb11cSYuval Mintz 	qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
10710a7fb11cSYuval Mintz out:
10720a7fb11cSYuval Mintz 	spin_unlock_irqrestore(&p_rx->lock, flags);
10730a7fb11cSYuval Mintz 	return rc;
10740a7fb11cSYuval Mintz }
10750a7fb11cSYuval Mintz 
10760a7fb11cSYuval Mintz static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
10770a7fb11cSYuval Mintz 					  struct qed_ll2_tx_queue *p_tx,
10780a7fb11cSYuval Mintz 					  struct qed_ll2_tx_packet *p_curp,
10790a7fb11cSYuval Mintz 					  u8 num_of_bds,
10800a7fb11cSYuval Mintz 					  dma_addr_t first_frag,
10810a7fb11cSYuval Mintz 					  u16 first_frag_len, void *p_cookie,
10820a7fb11cSYuval Mintz 					  u8 notify_fw)
10830a7fb11cSYuval Mintz {
10840a7fb11cSYuval Mintz 	list_del(&p_curp->list_entry);
10850a7fb11cSYuval Mintz 	p_curp->cookie = p_cookie;
10860a7fb11cSYuval Mintz 	p_curp->bd_used = num_of_bds;
10870a7fb11cSYuval Mintz 	p_curp->notify_fw = notify_fw;
10880a7fb11cSYuval Mintz 	p_tx->cur_send_packet = p_curp;
10890a7fb11cSYuval Mintz 	p_tx->cur_send_frag_num = 0;
10900a7fb11cSYuval Mintz 
10910a7fb11cSYuval Mintz 	p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
10920a7fb11cSYuval Mintz 	p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
10930a7fb11cSYuval Mintz 	p_tx->cur_send_frag_num++;
10940a7fb11cSYuval Mintz }
10950a7fb11cSYuval Mintz 
10960a7fb11cSYuval Mintz static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
10970a7fb11cSYuval Mintz 					     struct qed_ll2_info *p_ll2,
10980a7fb11cSYuval Mintz 					     struct qed_ll2_tx_packet *p_curp,
10990a7fb11cSYuval Mintz 					     u8 num_of_bds,
11000a7fb11cSYuval Mintz 					     enum core_tx_dest tx_dest,
11010a7fb11cSYuval Mintz 					     u16 vlan,
11020a7fb11cSYuval Mintz 					     u8 bd_flags,
11030a7fb11cSYuval Mintz 					     u16 l4_hdr_offset_w,
1104abd49676SRam Amrani 					     enum core_roce_flavor_type type,
11050a7fb11cSYuval Mintz 					     dma_addr_t first_frag,
11060a7fb11cSYuval Mintz 					     u16 first_frag_len)
11070a7fb11cSYuval Mintz {
11080a7fb11cSYuval Mintz 	struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
11090a7fb11cSYuval Mintz 	u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
11100a7fb11cSYuval Mintz 	struct core_tx_bd *start_bd = NULL;
11110a7fb11cSYuval Mintz 	u16 frag_idx;
11120a7fb11cSYuval Mintz 
11130a7fb11cSYuval Mintz 	start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
11140a7fb11cSYuval Mintz 	start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
11150a7fb11cSYuval Mintz 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
11160a7fb11cSYuval Mintz 		  cpu_to_le16(l4_hdr_offset_w));
11170a7fb11cSYuval Mintz 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
11180a7fb11cSYuval Mintz 	start_bd->bd_flags.as_bitfield = bd_flags;
11190a7fb11cSYuval Mintz 	start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
11200a7fb11cSYuval Mintz 	    CORE_TX_BD_FLAGS_START_BD_SHIFT;
11210a7fb11cSYuval Mintz 	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
11228d1d8fcbSRam Amrani 	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
11230a7fb11cSYuval Mintz 	DMA_REGPAIR_LE(start_bd->addr, first_frag);
11240a7fb11cSYuval Mintz 	start_bd->nbytes = cpu_to_le16(first_frag_len);
11250a7fb11cSYuval Mintz 
11260a7fb11cSYuval Mintz 	DP_VERBOSE(p_hwfn,
11270a7fb11cSYuval Mintz 		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
11280a7fb11cSYuval Mintz 		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
11290a7fb11cSYuval Mintz 		   p_ll2->queue_id,
11300a7fb11cSYuval Mintz 		   p_ll2->cid,
11310a7fb11cSYuval Mintz 		   p_ll2->conn_type,
11320a7fb11cSYuval Mintz 		   prod_idx,
11330a7fb11cSYuval Mintz 		   first_frag_len,
11340a7fb11cSYuval Mintz 		   num_of_bds,
11350a7fb11cSYuval Mintz 		   le32_to_cpu(start_bd->addr.hi),
11360a7fb11cSYuval Mintz 		   le32_to_cpu(start_bd->addr.lo));
11370a7fb11cSYuval Mintz 
11380a7fb11cSYuval Mintz 	if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
11390a7fb11cSYuval Mintz 		return;
11400a7fb11cSYuval Mintz 
11410a7fb11cSYuval Mintz 	/* Need to provide the packet with additional BDs for frags */
11420a7fb11cSYuval Mintz 	for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
11430a7fb11cSYuval Mintz 	     frag_idx < num_of_bds; frag_idx++) {
11440a7fb11cSYuval Mintz 		struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
11450a7fb11cSYuval Mintz 
11460a7fb11cSYuval Mintz 		*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
11470a7fb11cSYuval Mintz 		(*p_bd)->bd_flags.as_bitfield = 0;
11480a7fb11cSYuval Mintz 		(*p_bd)->bitfield1 = 0;
11490a7fb11cSYuval Mintz 		(*p_bd)->bitfield0 = 0;
11500a7fb11cSYuval Mintz 		p_curp->bds_set[frag_idx].tx_frag = 0;
11510a7fb11cSYuval Mintz 		p_curp->bds_set[frag_idx].frag_len = 0;
11520a7fb11cSYuval Mintz 	}
11530a7fb11cSYuval Mintz }
11540a7fb11cSYuval Mintz 
11550a7fb11cSYuval Mintz /* This should be called while the Txq spinlock is being held */
11560a7fb11cSYuval Mintz static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
11570a7fb11cSYuval Mintz 				     struct qed_ll2_info *p_ll2_conn)
11580a7fb11cSYuval Mintz {
11590a7fb11cSYuval Mintz 	bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
11600a7fb11cSYuval Mintz 	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
11610a7fb11cSYuval Mintz 	struct qed_ll2_tx_packet *p_pkt = NULL;
11620a7fb11cSYuval Mintz 	struct core_db_data db_msg = { 0, 0, 0 };
11630a7fb11cSYuval Mintz 	u16 bd_prod;
11640a7fb11cSYuval Mintz 
11650a7fb11cSYuval Mintz 	/* If there are missing BDs, don't do anything now */
11660a7fb11cSYuval Mintz 	if (p_ll2_conn->tx_queue.cur_send_frag_num !=
11670a7fb11cSYuval Mintz 	    p_ll2_conn->tx_queue.cur_send_packet->bd_used)
11680a7fb11cSYuval Mintz 		return;
11690a7fb11cSYuval Mintz 
11700a7fb11cSYuval Mintz 	/* Push the current packet to the list and clean after it */
11710a7fb11cSYuval Mintz 	list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
11720a7fb11cSYuval Mintz 		      &p_ll2_conn->tx_queue.sending_descq);
11730a7fb11cSYuval Mintz 	p_ll2_conn->tx_queue.cur_send_packet = NULL;
11740a7fb11cSYuval Mintz 	p_ll2_conn->tx_queue.cur_send_frag_num = 0;
11750a7fb11cSYuval Mintz 
11760a7fb11cSYuval Mintz 	/* Notify FW of packet only if requested to */
11770a7fb11cSYuval Mintz 	if (!b_notify)
11780a7fb11cSYuval Mintz 		return;
11790a7fb11cSYuval Mintz 
11800a7fb11cSYuval Mintz 	bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
11810a7fb11cSYuval Mintz 
11820a7fb11cSYuval Mintz 	while (!list_empty(&p_tx->sending_descq)) {
11830a7fb11cSYuval Mintz 		p_pkt = list_first_entry(&p_tx->sending_descq,
11840a7fb11cSYuval Mintz 					 struct qed_ll2_tx_packet, list_entry);
11850a7fb11cSYuval Mintz 		if (!p_pkt)
11860a7fb11cSYuval Mintz 			break;
11870a7fb11cSYuval Mintz 
1188b4f0fd4bSWei Yongjun 		list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
11890a7fb11cSYuval Mintz 	}
11900a7fb11cSYuval Mintz 
11910a7fb11cSYuval Mintz 	SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
11920a7fb11cSYuval Mintz 	SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
11930a7fb11cSYuval Mintz 	SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
11940a7fb11cSYuval Mintz 		  DQ_XCM_CORE_TX_BD_PROD_CMD);
11950a7fb11cSYuval Mintz 	db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
11960a7fb11cSYuval Mintz 	db_msg.spq_prod = cpu_to_le16(bd_prod);
11970a7fb11cSYuval Mintz 
11980a7fb11cSYuval Mintz 	/* Make sure the BDs data is updated before ringing the doorbell */
11990a7fb11cSYuval Mintz 	wmb();
12000a7fb11cSYuval Mintz 
12010a7fb11cSYuval Mintz 	DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
12020a7fb11cSYuval Mintz 
12030a7fb11cSYuval Mintz 	DP_VERBOSE(p_hwfn,
12040a7fb11cSYuval Mintz 		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
12050a7fb11cSYuval Mintz 		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
12060a7fb11cSYuval Mintz 		   p_ll2_conn->queue_id,
12070a7fb11cSYuval Mintz 		   p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
12080a7fb11cSYuval Mintz }
12090a7fb11cSYuval Mintz 
12100a7fb11cSYuval Mintz int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
12110a7fb11cSYuval Mintz 			      u8 connection_handle,
12120a7fb11cSYuval Mintz 			      u8 num_of_bds,
12130a7fb11cSYuval Mintz 			      u16 vlan,
12140a7fb11cSYuval Mintz 			      u8 bd_flags,
12150a7fb11cSYuval Mintz 			      u16 l4_hdr_offset_w,
1216abd49676SRam Amrani 			      enum qed_ll2_roce_flavor_type qed_roce_flavor,
12170a7fb11cSYuval Mintz 			      dma_addr_t first_frag,
12180a7fb11cSYuval Mintz 			      u16 first_frag_len, void *cookie, u8 notify_fw)
12190a7fb11cSYuval Mintz {
12200a7fb11cSYuval Mintz 	struct qed_ll2_tx_packet *p_curp = NULL;
12210a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn = NULL;
1222abd49676SRam Amrani 	enum core_roce_flavor_type roce_flavor;
12230a7fb11cSYuval Mintz 	struct qed_ll2_tx_queue *p_tx;
12240a7fb11cSYuval Mintz 	struct qed_chain *p_tx_chain;
12250a7fb11cSYuval Mintz 	unsigned long flags;
12260a7fb11cSYuval Mintz 	int rc = 0;
12270a7fb11cSYuval Mintz 
12280a7fb11cSYuval Mintz 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
12290a7fb11cSYuval Mintz 	if (!p_ll2_conn)
12300a7fb11cSYuval Mintz 		return -EINVAL;
12310a7fb11cSYuval Mintz 	p_tx = &p_ll2_conn->tx_queue;
12320a7fb11cSYuval Mintz 	p_tx_chain = &p_tx->txq_chain;
12330a7fb11cSYuval Mintz 
12340a7fb11cSYuval Mintz 	if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
12350a7fb11cSYuval Mintz 		return -EIO;
12360a7fb11cSYuval Mintz 
12370a7fb11cSYuval Mintz 	spin_lock_irqsave(&p_tx->lock, flags);
12380a7fb11cSYuval Mintz 	if (p_tx->cur_send_packet) {
12390a7fb11cSYuval Mintz 		rc = -EEXIST;
12400a7fb11cSYuval Mintz 		goto out;
12410a7fb11cSYuval Mintz 	}
12420a7fb11cSYuval Mintz 
12430a7fb11cSYuval Mintz 	/* Get entry, but only if we have tx elements for it */
12440a7fb11cSYuval Mintz 	if (!list_empty(&p_tx->free_descq))
12450a7fb11cSYuval Mintz 		p_curp = list_first_entry(&p_tx->free_descq,
12460a7fb11cSYuval Mintz 					  struct qed_ll2_tx_packet, list_entry);
12470a7fb11cSYuval Mintz 	if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
12480a7fb11cSYuval Mintz 		p_curp = NULL;
12490a7fb11cSYuval Mintz 
12500a7fb11cSYuval Mintz 	if (!p_curp) {
12510a7fb11cSYuval Mintz 		rc = -EBUSY;
12520a7fb11cSYuval Mintz 		goto out;
12530a7fb11cSYuval Mintz 	}
12540a7fb11cSYuval Mintz 
1255abd49676SRam Amrani 	if (qed_roce_flavor == QED_LL2_ROCE) {
1256abd49676SRam Amrani 		roce_flavor = CORE_ROCE;
1257abd49676SRam Amrani 	} else if (qed_roce_flavor == QED_LL2_RROCE) {
1258abd49676SRam Amrani 		roce_flavor = CORE_RROCE;
1259abd49676SRam Amrani 	} else {
1260abd49676SRam Amrani 		rc = -EINVAL;
1261abd49676SRam Amrani 		goto out;
1262abd49676SRam Amrani 	}
1263abd49676SRam Amrani 
12640a7fb11cSYuval Mintz 	/* Prepare packet and BD, and perhaps send a doorbell to FW */
12650a7fb11cSYuval Mintz 	qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
12660a7fb11cSYuval Mintz 				      num_of_bds, first_frag,
12670a7fb11cSYuval Mintz 				      first_frag_len, cookie, notify_fw);
12680a7fb11cSYuval Mintz 	qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
12690a7fb11cSYuval Mintz 					 num_of_bds, CORE_TX_DEST_NW,
12700a7fb11cSYuval Mintz 					 vlan, bd_flags, l4_hdr_offset_w,
1271abd49676SRam Amrani 					 roce_flavor,
12720a7fb11cSYuval Mintz 					 first_frag, first_frag_len);
12730a7fb11cSYuval Mintz 
12740a7fb11cSYuval Mintz 	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
12750a7fb11cSYuval Mintz 
12760a7fb11cSYuval Mintz out:
12770a7fb11cSYuval Mintz 	spin_unlock_irqrestore(&p_tx->lock, flags);
12780a7fb11cSYuval Mintz 	return rc;
12790a7fb11cSYuval Mintz }
12800a7fb11cSYuval Mintz 
12810a7fb11cSYuval Mintz int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
12820a7fb11cSYuval Mintz 				      u8 connection_handle,
12830a7fb11cSYuval Mintz 				      dma_addr_t addr, u16 nbytes)
12840a7fb11cSYuval Mintz {
12850a7fb11cSYuval Mintz 	struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
12860a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn = NULL;
12870a7fb11cSYuval Mintz 	u16 cur_send_frag_num = 0;
12880a7fb11cSYuval Mintz 	struct core_tx_bd *p_bd;
12890a7fb11cSYuval Mintz 	unsigned long flags;
12900a7fb11cSYuval Mintz 
12910a7fb11cSYuval Mintz 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
12920a7fb11cSYuval Mintz 	if (!p_ll2_conn)
12930a7fb11cSYuval Mintz 		return -EINVAL;
12940a7fb11cSYuval Mintz 
12950a7fb11cSYuval Mintz 	if (!p_ll2_conn->tx_queue.cur_send_packet)
12960a7fb11cSYuval Mintz 		return -EINVAL;
12970a7fb11cSYuval Mintz 
12980a7fb11cSYuval Mintz 	p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
12990a7fb11cSYuval Mintz 	cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
13000a7fb11cSYuval Mintz 
13010a7fb11cSYuval Mintz 	if (cur_send_frag_num >= p_cur_send_packet->bd_used)
13020a7fb11cSYuval Mintz 		return -EINVAL;
13030a7fb11cSYuval Mintz 
13040a7fb11cSYuval Mintz 	/* Fill the BD information, and possibly notify FW */
13050a7fb11cSYuval Mintz 	p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
13060a7fb11cSYuval Mintz 	DMA_REGPAIR_LE(p_bd->addr, addr);
13070a7fb11cSYuval Mintz 	p_bd->nbytes = cpu_to_le16(nbytes);
13080a7fb11cSYuval Mintz 	p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
13090a7fb11cSYuval Mintz 	p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
13100a7fb11cSYuval Mintz 
13110a7fb11cSYuval Mintz 	p_ll2_conn->tx_queue.cur_send_frag_num++;
13120a7fb11cSYuval Mintz 
13130a7fb11cSYuval Mintz 	spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
13140a7fb11cSYuval Mintz 	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
13150a7fb11cSYuval Mintz 	spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
13160a7fb11cSYuval Mintz 
13170a7fb11cSYuval Mintz 	return 0;
13180a7fb11cSYuval Mintz }
13190a7fb11cSYuval Mintz 
13200a7fb11cSYuval Mintz int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
13210a7fb11cSYuval Mintz {
13220a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn = NULL;
13230a7fb11cSYuval Mintz 	int rc = -EINVAL;
13240a7fb11cSYuval Mintz 
13250a7fb11cSYuval Mintz 	p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
13260a7fb11cSYuval Mintz 	if (!p_ll2_conn)
13270a7fb11cSYuval Mintz 		return -EINVAL;
13280a7fb11cSYuval Mintz 
13290a7fb11cSYuval Mintz 	/* Stop Tx & Rx of connection, if needed */
13300a7fb11cSYuval Mintz 	if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
13310a7fb11cSYuval Mintz 		rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
13320a7fb11cSYuval Mintz 		if (rc)
13330a7fb11cSYuval Mintz 			return rc;
13340a7fb11cSYuval Mintz 		qed_ll2_txq_flush(p_hwfn, connection_handle);
13350a7fb11cSYuval Mintz 	}
13360a7fb11cSYuval Mintz 
13370a7fb11cSYuval Mintz 	if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
13380a7fb11cSYuval Mintz 		rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
13390a7fb11cSYuval Mintz 		if (rc)
13400a7fb11cSYuval Mintz 			return rc;
13410a7fb11cSYuval Mintz 		qed_ll2_rxq_flush(p_hwfn, connection_handle);
13420a7fb11cSYuval Mintz 	}
13430a7fb11cSYuval Mintz 
13440a7fb11cSYuval Mintz 	return rc;
13450a7fb11cSYuval Mintz }
13460a7fb11cSYuval Mintz 
13470a7fb11cSYuval Mintz void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
13480a7fb11cSYuval Mintz {
13490a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn = NULL;
13500a7fb11cSYuval Mintz 
13510a7fb11cSYuval Mintz 	p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
13520a7fb11cSYuval Mintz 	if (!p_ll2_conn)
13530a7fb11cSYuval Mintz 		return;
13540a7fb11cSYuval Mintz 
13550a7fb11cSYuval Mintz 	if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
13560a7fb11cSYuval Mintz 		p_ll2_conn->rx_queue.b_cb_registred = false;
13570a7fb11cSYuval Mintz 		qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
13580a7fb11cSYuval Mintz 	}
13590a7fb11cSYuval Mintz 
13600a7fb11cSYuval Mintz 	if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
13610a7fb11cSYuval Mintz 		p_ll2_conn->tx_queue.b_cb_registred = false;
13620a7fb11cSYuval Mintz 		qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
13630a7fb11cSYuval Mintz 	}
13640a7fb11cSYuval Mintz 
13650a7fb11cSYuval Mintz 	kfree(p_ll2_conn->tx_queue.descq_array);
13660a7fb11cSYuval Mintz 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
13670a7fb11cSYuval Mintz 
13680a7fb11cSYuval Mintz 	kfree(p_ll2_conn->rx_queue.descq_array);
13690a7fb11cSYuval Mintz 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
13700a7fb11cSYuval Mintz 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
13710a7fb11cSYuval Mintz 
13720a7fb11cSYuval Mintz 	qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
13730a7fb11cSYuval Mintz 
13740a7fb11cSYuval Mintz 	mutex_lock(&p_ll2_conn->mutex);
13750a7fb11cSYuval Mintz 	p_ll2_conn->b_active = false;
13760a7fb11cSYuval Mintz 	mutex_unlock(&p_ll2_conn->mutex);
13770a7fb11cSYuval Mintz }
13780a7fb11cSYuval Mintz 
13790a7fb11cSYuval Mintz struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
13800a7fb11cSYuval Mintz {
13810a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_connections;
13820a7fb11cSYuval Mintz 	u8 i;
13830a7fb11cSYuval Mintz 
13840a7fb11cSYuval Mintz 	/* Allocate LL2's set struct */
13850a7fb11cSYuval Mintz 	p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
13860a7fb11cSYuval Mintz 				    sizeof(struct qed_ll2_info), GFP_KERNEL);
13870a7fb11cSYuval Mintz 	if (!p_ll2_connections) {
13880a7fb11cSYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
13890a7fb11cSYuval Mintz 		return NULL;
13900a7fb11cSYuval Mintz 	}
13910a7fb11cSYuval Mintz 
13920a7fb11cSYuval Mintz 	for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
13930a7fb11cSYuval Mintz 		p_ll2_connections[i].my_id = i;
13940a7fb11cSYuval Mintz 
13950a7fb11cSYuval Mintz 	return p_ll2_connections;
13960a7fb11cSYuval Mintz }
13970a7fb11cSYuval Mintz 
13980a7fb11cSYuval Mintz void qed_ll2_setup(struct qed_hwfn *p_hwfn,
13990a7fb11cSYuval Mintz 		   struct qed_ll2_info *p_ll2_connections)
14000a7fb11cSYuval Mintz {
14010a7fb11cSYuval Mintz 	int i;
14020a7fb11cSYuval Mintz 
14030a7fb11cSYuval Mintz 	for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
14040a7fb11cSYuval Mintz 		mutex_init(&p_ll2_connections[i].mutex);
14050a7fb11cSYuval Mintz }
14060a7fb11cSYuval Mintz 
14070a7fb11cSYuval Mintz void qed_ll2_free(struct qed_hwfn *p_hwfn,
14080a7fb11cSYuval Mintz 		  struct qed_ll2_info *p_ll2_connections)
14090a7fb11cSYuval Mintz {
14100a7fb11cSYuval Mintz 	kfree(p_ll2_connections);
14110a7fb11cSYuval Mintz }
14120a7fb11cSYuval Mintz 
14130a7fb11cSYuval Mintz static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
14140a7fb11cSYuval Mintz 				struct qed_ptt *p_ptt,
14150a7fb11cSYuval Mintz 				struct qed_ll2_info *p_ll2_conn,
14160a7fb11cSYuval Mintz 				struct qed_ll2_stats *p_stats)
14170a7fb11cSYuval Mintz {
14180a7fb11cSYuval Mintz 	struct core_ll2_tstorm_per_queue_stat tstats;
14190a7fb11cSYuval Mintz 	u8 qid = p_ll2_conn->queue_id;
14200a7fb11cSYuval Mintz 	u32 tstats_addr;
14210a7fb11cSYuval Mintz 
14220a7fb11cSYuval Mintz 	memset(&tstats, 0, sizeof(tstats));
14230a7fb11cSYuval Mintz 	tstats_addr = BAR0_MAP_REG_TSDM_RAM +
14240a7fb11cSYuval Mintz 		      CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
14250a7fb11cSYuval Mintz 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
14260a7fb11cSYuval Mintz 
14270a7fb11cSYuval Mintz 	p_stats->packet_too_big_discard =
14280a7fb11cSYuval Mintz 			HILO_64_REGPAIR(tstats.packet_too_big_discard);
14290a7fb11cSYuval Mintz 	p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
14300a7fb11cSYuval Mintz }
14310a7fb11cSYuval Mintz 
14320a7fb11cSYuval Mintz static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
14330a7fb11cSYuval Mintz 				struct qed_ptt *p_ptt,
14340a7fb11cSYuval Mintz 				struct qed_ll2_info *p_ll2_conn,
14350a7fb11cSYuval Mintz 				struct qed_ll2_stats *p_stats)
14360a7fb11cSYuval Mintz {
14370a7fb11cSYuval Mintz 	struct core_ll2_ustorm_per_queue_stat ustats;
14380a7fb11cSYuval Mintz 	u8 qid = p_ll2_conn->queue_id;
14390a7fb11cSYuval Mintz 	u32 ustats_addr;
14400a7fb11cSYuval Mintz 
14410a7fb11cSYuval Mintz 	memset(&ustats, 0, sizeof(ustats));
14420a7fb11cSYuval Mintz 	ustats_addr = BAR0_MAP_REG_USDM_RAM +
14430a7fb11cSYuval Mintz 		      CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
14440a7fb11cSYuval Mintz 	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
14450a7fb11cSYuval Mintz 
14460a7fb11cSYuval Mintz 	p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
14470a7fb11cSYuval Mintz 	p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
14480a7fb11cSYuval Mintz 	p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
14490a7fb11cSYuval Mintz 	p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
14500a7fb11cSYuval Mintz 	p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
14510a7fb11cSYuval Mintz 	p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
14520a7fb11cSYuval Mintz }
14530a7fb11cSYuval Mintz 
14540a7fb11cSYuval Mintz static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
14550a7fb11cSYuval Mintz 				struct qed_ptt *p_ptt,
14560a7fb11cSYuval Mintz 				struct qed_ll2_info *p_ll2_conn,
14570a7fb11cSYuval Mintz 				struct qed_ll2_stats *p_stats)
14580a7fb11cSYuval Mintz {
14590a7fb11cSYuval Mintz 	struct core_ll2_pstorm_per_queue_stat pstats;
14600a7fb11cSYuval Mintz 	u8 stats_id = p_ll2_conn->tx_stats_id;
14610a7fb11cSYuval Mintz 	u32 pstats_addr;
14620a7fb11cSYuval Mintz 
14630a7fb11cSYuval Mintz 	memset(&pstats, 0, sizeof(pstats));
14640a7fb11cSYuval Mintz 	pstats_addr = BAR0_MAP_REG_PSDM_RAM +
14650a7fb11cSYuval Mintz 		      CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
14660a7fb11cSYuval Mintz 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
14670a7fb11cSYuval Mintz 
14680a7fb11cSYuval Mintz 	p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
14690a7fb11cSYuval Mintz 	p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
14700a7fb11cSYuval Mintz 	p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
14710a7fb11cSYuval Mintz 	p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
14720a7fb11cSYuval Mintz 	p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
14730a7fb11cSYuval Mintz 	p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
14740a7fb11cSYuval Mintz }
14750a7fb11cSYuval Mintz 
14760a7fb11cSYuval Mintz int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
14770a7fb11cSYuval Mintz 		      u8 connection_handle, struct qed_ll2_stats *p_stats)
14780a7fb11cSYuval Mintz {
14790a7fb11cSYuval Mintz 	struct qed_ll2_info *p_ll2_conn = NULL;
14800a7fb11cSYuval Mintz 	struct qed_ptt *p_ptt;
14810a7fb11cSYuval Mintz 
14820a7fb11cSYuval Mintz 	memset(p_stats, 0, sizeof(*p_stats));
14830a7fb11cSYuval Mintz 
14840a7fb11cSYuval Mintz 	if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
14850a7fb11cSYuval Mintz 	    !p_hwfn->p_ll2_info)
14860a7fb11cSYuval Mintz 		return -EINVAL;
14870a7fb11cSYuval Mintz 
14880a7fb11cSYuval Mintz 	p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
14890a7fb11cSYuval Mintz 
14900a7fb11cSYuval Mintz 	p_ptt = qed_ptt_acquire(p_hwfn);
14910a7fb11cSYuval Mintz 	if (!p_ptt) {
14920a7fb11cSYuval Mintz 		DP_ERR(p_hwfn, "Failed to acquire ptt\n");
14930a7fb11cSYuval Mintz 		return -EINVAL;
14940a7fb11cSYuval Mintz 	}
14950a7fb11cSYuval Mintz 
14960a7fb11cSYuval Mintz 	_qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
14970a7fb11cSYuval Mintz 	_qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
14980a7fb11cSYuval Mintz 	if (p_ll2_conn->tx_stats_en)
14990a7fb11cSYuval Mintz 		_qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
15000a7fb11cSYuval Mintz 
15010a7fb11cSYuval Mintz 	qed_ptt_release(p_hwfn, p_ptt);
15020a7fb11cSYuval Mintz 	return 0;
15030a7fb11cSYuval Mintz }
15040a7fb11cSYuval Mintz 
15050a7fb11cSYuval Mintz static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
15060a7fb11cSYuval Mintz 				    const struct qed_ll2_cb_ops *ops,
15070a7fb11cSYuval Mintz 				    void *cookie)
15080a7fb11cSYuval Mintz {
15090a7fb11cSYuval Mintz 	cdev->ll2->cbs = ops;
15100a7fb11cSYuval Mintz 	cdev->ll2->cb_cookie = cookie;
15110a7fb11cSYuval Mintz }
15120a7fb11cSYuval Mintz 
15130a7fb11cSYuval Mintz static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
15140a7fb11cSYuval Mintz {
15150a7fb11cSYuval Mintz 	struct qed_ll2_info ll2_info;
151688a2428bSWei Yongjun 	struct qed_ll2_buffer *buffer, *tmp_buffer;
15170a7fb11cSYuval Mintz 	enum qed_ll2_conn_type conn_type;
15180a7fb11cSYuval Mintz 	struct qed_ptt *p_ptt;
15190a7fb11cSYuval Mintz 	int rc, i;
15200a7fb11cSYuval Mintz 
15210a7fb11cSYuval Mintz 	/* Initialize LL2 locks & lists */
15220a7fb11cSYuval Mintz 	INIT_LIST_HEAD(&cdev->ll2->list);
15230a7fb11cSYuval Mintz 	spin_lock_init(&cdev->ll2->lock);
15240a7fb11cSYuval Mintz 	cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
15250a7fb11cSYuval Mintz 			     L1_CACHE_BYTES + params->mtu;
15260a7fb11cSYuval Mintz 	cdev->ll2->frags_mapped = params->frags_mapped;
15270a7fb11cSYuval Mintz 
15280a7fb11cSYuval Mintz 	/*Allocate memory for LL2 */
15290a7fb11cSYuval Mintz 	DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
15300a7fb11cSYuval Mintz 		cdev->ll2->rx_size);
15310a7fb11cSYuval Mintz 	for (i = 0; i < QED_LL2_RX_SIZE; i++) {
15320a7fb11cSYuval Mintz 		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
15330a7fb11cSYuval Mintz 		if (!buffer) {
15340a7fb11cSYuval Mintz 			DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
15350a7fb11cSYuval Mintz 			goto fail;
15360a7fb11cSYuval Mintz 		}
15370a7fb11cSYuval Mintz 
15380a7fb11cSYuval Mintz 		rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
15390a7fb11cSYuval Mintz 					  &buffer->phys_addr);
15400a7fb11cSYuval Mintz 		if (rc) {
15410a7fb11cSYuval Mintz 			kfree(buffer);
15420a7fb11cSYuval Mintz 			goto fail;
15430a7fb11cSYuval Mintz 		}
15440a7fb11cSYuval Mintz 
15450a7fb11cSYuval Mintz 		list_add_tail(&buffer->list, &cdev->ll2->list);
15460a7fb11cSYuval Mintz 	}
15470a7fb11cSYuval Mintz 
15480a7fb11cSYuval Mintz 	switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
15490a7fb11cSYuval Mintz 	case QED_PCI_ISCSI:
15500a7fb11cSYuval Mintz 		conn_type = QED_LL2_TYPE_ISCSI;
15510a7fb11cSYuval Mintz 		break;
15520a7fb11cSYuval Mintz 	case QED_PCI_ETH_ROCE:
15530a7fb11cSYuval Mintz 		conn_type = QED_LL2_TYPE_ROCE;
15540a7fb11cSYuval Mintz 		break;
15550a7fb11cSYuval Mintz 	default:
15560a7fb11cSYuval Mintz 		conn_type = QED_LL2_TYPE_TEST;
15570a7fb11cSYuval Mintz 	}
15580a7fb11cSYuval Mintz 
15590a7fb11cSYuval Mintz 	/* Prepare the temporary ll2 information */
15600a7fb11cSYuval Mintz 	memset(&ll2_info, 0, sizeof(ll2_info));
15610a7fb11cSYuval Mintz 	ll2_info.conn_type = conn_type;
15620a7fb11cSYuval Mintz 	ll2_info.mtu = params->mtu;
15630a7fb11cSYuval Mintz 	ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
15640a7fb11cSYuval Mintz 	ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
15650a7fb11cSYuval Mintz 	ll2_info.tx_tc = 0;
15660a7fb11cSYuval Mintz 	ll2_info.tx_dest = CORE_TX_DEST_NW;
1567abd49676SRam Amrani 	ll2_info.gsi_enable = 1;
15680a7fb11cSYuval Mintz 
15690a7fb11cSYuval Mintz 	rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
15700a7fb11cSYuval Mintz 					QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
15710a7fb11cSYuval Mintz 					&cdev->ll2->handle);
15720a7fb11cSYuval Mintz 	if (rc) {
15730a7fb11cSYuval Mintz 		DP_INFO(cdev, "Failed to acquire LL2 connection\n");
15740a7fb11cSYuval Mintz 		goto fail;
15750a7fb11cSYuval Mintz 	}
15760a7fb11cSYuval Mintz 
15770a7fb11cSYuval Mintz 	rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
15780a7fb11cSYuval Mintz 					  cdev->ll2->handle);
15790a7fb11cSYuval Mintz 	if (rc) {
15800a7fb11cSYuval Mintz 		DP_INFO(cdev, "Failed to establish LL2 connection\n");
15810a7fb11cSYuval Mintz 		goto release_fail;
15820a7fb11cSYuval Mintz 	}
15830a7fb11cSYuval Mintz 
15840a7fb11cSYuval Mintz 	/* Post all Rx buffers to FW */
15850a7fb11cSYuval Mintz 	spin_lock_bh(&cdev->ll2->lock);
158688a2428bSWei Yongjun 	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
15870a7fb11cSYuval Mintz 		rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
15880a7fb11cSYuval Mintz 					    cdev->ll2->handle,
15890a7fb11cSYuval Mintz 					    buffer->phys_addr, 0, buffer, 1);
15900a7fb11cSYuval Mintz 		if (rc) {
15910a7fb11cSYuval Mintz 			DP_INFO(cdev,
15920a7fb11cSYuval Mintz 				"Failed to post an Rx buffer; Deleting it\n");
15930a7fb11cSYuval Mintz 			dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
15940a7fb11cSYuval Mintz 					 cdev->ll2->rx_size, DMA_FROM_DEVICE);
15950a7fb11cSYuval Mintz 			kfree(buffer->data);
15960a7fb11cSYuval Mintz 			list_del(&buffer->list);
15970a7fb11cSYuval Mintz 			kfree(buffer);
15980a7fb11cSYuval Mintz 		} else {
15990a7fb11cSYuval Mintz 			cdev->ll2->rx_cnt++;
16000a7fb11cSYuval Mintz 		}
16010a7fb11cSYuval Mintz 	}
16020a7fb11cSYuval Mintz 	spin_unlock_bh(&cdev->ll2->lock);
16030a7fb11cSYuval Mintz 
16040a7fb11cSYuval Mintz 	if (!cdev->ll2->rx_cnt) {
16050a7fb11cSYuval Mintz 		DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
16060a7fb11cSYuval Mintz 		goto release_terminate;
16070a7fb11cSYuval Mintz 	}
16080a7fb11cSYuval Mintz 
16090a7fb11cSYuval Mintz 	if (!is_valid_ether_addr(params->ll2_mac_address)) {
16100a7fb11cSYuval Mintz 		DP_INFO(cdev, "Invalid Ethernet address\n");
16110a7fb11cSYuval Mintz 		goto release_terminate;
16120a7fb11cSYuval Mintz 	}
16130a7fb11cSYuval Mintz 
16140a7fb11cSYuval Mintz 	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
16150a7fb11cSYuval Mintz 	if (!p_ptt) {
16160a7fb11cSYuval Mintz 		DP_INFO(cdev, "Failed to acquire PTT\n");
16170a7fb11cSYuval Mintz 		goto release_terminate;
16180a7fb11cSYuval Mintz 	}
16190a7fb11cSYuval Mintz 
16200a7fb11cSYuval Mintz 	rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
16210a7fb11cSYuval Mintz 				    params->ll2_mac_address);
16220a7fb11cSYuval Mintz 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
16230a7fb11cSYuval Mintz 	if (rc) {
16240a7fb11cSYuval Mintz 		DP_ERR(cdev, "Failed to allocate LLH filter\n");
16250a7fb11cSYuval Mintz 		goto release_terminate_all;
16260a7fb11cSYuval Mintz 	}
16270a7fb11cSYuval Mintz 
16280a7fb11cSYuval Mintz 	ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
16290a7fb11cSYuval Mintz 
16300a7fb11cSYuval Mintz 	return 0;
16310a7fb11cSYuval Mintz 
16320a7fb11cSYuval Mintz release_terminate_all:
16330a7fb11cSYuval Mintz 
16340a7fb11cSYuval Mintz release_terminate:
16350a7fb11cSYuval Mintz 	qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
16360a7fb11cSYuval Mintz release_fail:
16370a7fb11cSYuval Mintz 	qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
16380a7fb11cSYuval Mintz fail:
16390a7fb11cSYuval Mintz 	qed_ll2_kill_buffers(cdev);
16400a7fb11cSYuval Mintz 	cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
16410a7fb11cSYuval Mintz 	return -EINVAL;
16420a7fb11cSYuval Mintz }
16430a7fb11cSYuval Mintz 
16440a7fb11cSYuval Mintz static int qed_ll2_stop(struct qed_dev *cdev)
16450a7fb11cSYuval Mintz {
16460a7fb11cSYuval Mintz 	struct qed_ptt *p_ptt;
16470a7fb11cSYuval Mintz 	int rc;
16480a7fb11cSYuval Mintz 
16490a7fb11cSYuval Mintz 	if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
16500a7fb11cSYuval Mintz 		return 0;
16510a7fb11cSYuval Mintz 
16520a7fb11cSYuval Mintz 	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
16530a7fb11cSYuval Mintz 	if (!p_ptt) {
16540a7fb11cSYuval Mintz 		DP_INFO(cdev, "Failed to acquire PTT\n");
16550a7fb11cSYuval Mintz 		goto fail;
16560a7fb11cSYuval Mintz 	}
16570a7fb11cSYuval Mintz 
16580a7fb11cSYuval Mintz 	qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
16590a7fb11cSYuval Mintz 				  cdev->ll2_mac_address);
16600a7fb11cSYuval Mintz 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
16610a7fb11cSYuval Mintz 	eth_zero_addr(cdev->ll2_mac_address);
16620a7fb11cSYuval Mintz 
16630a7fb11cSYuval Mintz 	rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
16640a7fb11cSYuval Mintz 					  cdev->ll2->handle);
16650a7fb11cSYuval Mintz 	if (rc)
16660a7fb11cSYuval Mintz 		DP_INFO(cdev, "Failed to terminate LL2 connection\n");
16670a7fb11cSYuval Mintz 
16680a7fb11cSYuval Mintz 	qed_ll2_kill_buffers(cdev);
16690a7fb11cSYuval Mintz 
16700a7fb11cSYuval Mintz 	qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
16710a7fb11cSYuval Mintz 	cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
16720a7fb11cSYuval Mintz 
16730a7fb11cSYuval Mintz 	return rc;
16740a7fb11cSYuval Mintz fail:
16750a7fb11cSYuval Mintz 	return -EINVAL;
16760a7fb11cSYuval Mintz }
16770a7fb11cSYuval Mintz 
16780a7fb11cSYuval Mintz static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
16790a7fb11cSYuval Mintz {
16800a7fb11cSYuval Mintz 	const skb_frag_t *frag;
16810a7fb11cSYuval Mintz 	int rc = -EINVAL, i;
16820a7fb11cSYuval Mintz 	dma_addr_t mapping;
16830a7fb11cSYuval Mintz 	u16 vlan = 0;
16840a7fb11cSYuval Mintz 	u8 flags = 0;
16850a7fb11cSYuval Mintz 
16860a7fb11cSYuval Mintz 	if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
16870a7fb11cSYuval Mintz 		DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
16880a7fb11cSYuval Mintz 		return -EINVAL;
16890a7fb11cSYuval Mintz 	}
16900a7fb11cSYuval Mintz 
16910a7fb11cSYuval Mintz 	if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
16920a7fb11cSYuval Mintz 		DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
16930a7fb11cSYuval Mintz 		       1 + skb_shinfo(skb)->nr_frags);
16940a7fb11cSYuval Mintz 		return -EINVAL;
16950a7fb11cSYuval Mintz 	}
16960a7fb11cSYuval Mintz 
16970a7fb11cSYuval Mintz 	mapping = dma_map_single(&cdev->pdev->dev, skb->data,
16980a7fb11cSYuval Mintz 				 skb->len, DMA_TO_DEVICE);
16990a7fb11cSYuval Mintz 	if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
17000a7fb11cSYuval Mintz 		DP_NOTICE(cdev, "SKB mapping failed\n");
17010a7fb11cSYuval Mintz 		return -EINVAL;
17020a7fb11cSYuval Mintz 	}
17030a7fb11cSYuval Mintz 
17040a7fb11cSYuval Mintz 	/* Request HW to calculate IP csum */
17050a7fb11cSYuval Mintz 	if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
17060a7fb11cSYuval Mintz 	      ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
17070a7fb11cSYuval Mintz 		flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
17080a7fb11cSYuval Mintz 
17090a7fb11cSYuval Mintz 	if (skb_vlan_tag_present(skb)) {
17100a7fb11cSYuval Mintz 		vlan = skb_vlan_tag_get(skb);
17110a7fb11cSYuval Mintz 		flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
17120a7fb11cSYuval Mintz 	}
17130a7fb11cSYuval Mintz 
17140a7fb11cSYuval Mintz 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
17150a7fb11cSYuval Mintz 				       cdev->ll2->handle,
17160a7fb11cSYuval Mintz 				       1 + skb_shinfo(skb)->nr_frags,
1717abd49676SRam Amrani 				       vlan, flags, 0, 0 /* RoCE FLAVOR */,
1718abd49676SRam Amrani 				       mapping, skb->len, skb, 1);
17190a7fb11cSYuval Mintz 	if (rc)
17200a7fb11cSYuval Mintz 		goto err;
17210a7fb11cSYuval Mintz 
17220a7fb11cSYuval Mintz 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
17230a7fb11cSYuval Mintz 		frag = &skb_shinfo(skb)->frags[i];
17240a7fb11cSYuval Mintz 		if (!cdev->ll2->frags_mapped) {
17250a7fb11cSYuval Mintz 			mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
17260a7fb11cSYuval Mintz 						   skb_frag_size(frag),
17270a7fb11cSYuval Mintz 						   DMA_TO_DEVICE);
17280a7fb11cSYuval Mintz 
17290a7fb11cSYuval Mintz 			if (unlikely(dma_mapping_error(&cdev->pdev->dev,
17300a7fb11cSYuval Mintz 						       mapping))) {
17310a7fb11cSYuval Mintz 				DP_NOTICE(cdev,
17320a7fb11cSYuval Mintz 					  "Unable to map frag - dropping packet\n");
17330a7fb11cSYuval Mintz 				goto err;
17340a7fb11cSYuval Mintz 			}
17350a7fb11cSYuval Mintz 		} else {
17360a7fb11cSYuval Mintz 			mapping = page_to_phys(skb_frag_page(frag)) |
17370a7fb11cSYuval Mintz 			    frag->page_offset;
17380a7fb11cSYuval Mintz 		}
17390a7fb11cSYuval Mintz 
17400a7fb11cSYuval Mintz 		rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
17410a7fb11cSYuval Mintz 						       cdev->ll2->handle,
17420a7fb11cSYuval Mintz 						       mapping,
17430a7fb11cSYuval Mintz 						       skb_frag_size(frag));
17440a7fb11cSYuval Mintz 
17450a7fb11cSYuval Mintz 		/* if failed not much to do here, partial packet has been posted
17460a7fb11cSYuval Mintz 		 * we can't free memory, will need to wait for completion.
17470a7fb11cSYuval Mintz 		 */
17480a7fb11cSYuval Mintz 		if (rc)
17490a7fb11cSYuval Mintz 			goto err2;
17500a7fb11cSYuval Mintz 	}
17510a7fb11cSYuval Mintz 
17520a7fb11cSYuval Mintz 	return 0;
17530a7fb11cSYuval Mintz 
17540a7fb11cSYuval Mintz err:
17550a7fb11cSYuval Mintz 	dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
17560a7fb11cSYuval Mintz 
17570a7fb11cSYuval Mintz err2:
17580a7fb11cSYuval Mintz 	return rc;
17590a7fb11cSYuval Mintz }
17600a7fb11cSYuval Mintz 
17610a7fb11cSYuval Mintz static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
17620a7fb11cSYuval Mintz {
17630a7fb11cSYuval Mintz 	if (!cdev->ll2)
17640a7fb11cSYuval Mintz 		return -EINVAL;
17650a7fb11cSYuval Mintz 
17660a7fb11cSYuval Mintz 	return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
17670a7fb11cSYuval Mintz 				 cdev->ll2->handle, stats);
17680a7fb11cSYuval Mintz }
17690a7fb11cSYuval Mintz 
17700a7fb11cSYuval Mintz const struct qed_ll2_ops qed_ll2_ops_pass = {
17710a7fb11cSYuval Mintz 	.start = &qed_ll2_start,
17720a7fb11cSYuval Mintz 	.stop = &qed_ll2_stop,
17730a7fb11cSYuval Mintz 	.start_xmit = &qed_ll2_start_xmit,
17740a7fb11cSYuval Mintz 	.register_cb_ops = &qed_ll2_register_cb_ops,
17750a7fb11cSYuval Mintz 	.get_stats = &qed_ll2_stats,
17760a7fb11cSYuval Mintz };
17770a7fb11cSYuval Mintz 
17780a7fb11cSYuval Mintz int qed_ll2_alloc_if(struct qed_dev *cdev)
17790a7fb11cSYuval Mintz {
17800a7fb11cSYuval Mintz 	cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
17810a7fb11cSYuval Mintz 	return cdev->ll2 ? 0 : -ENOMEM;
17820a7fb11cSYuval Mintz }
17830a7fb11cSYuval Mintz 
17840a7fb11cSYuval Mintz void qed_ll2_dealloc_if(struct qed_dev *cdev)
17850a7fb11cSYuval Mintz {
17860a7fb11cSYuval Mintz 	kfree(cdev->ll2);
17870a7fb11cSYuval Mintz 	cdev->ll2 = NULL;
17880a7fb11cSYuval Mintz }
1789