1aba53d5dSSunil Goutham // SPDX-License-Identifier: GPL-2.0
2c7cd6c5aSSunil Goutham /* Marvell RVU Admin Function driver
3aba53d5dSSunil Goutham *
4c7cd6c5aSSunil Goutham * Copyright (C) 2018 Marvell.
5aba53d5dSSunil Goutham *
6aba53d5dSSunil Goutham */
7aba53d5dSSunil Goutham
8aba53d5dSSunil Goutham #include <linux/module.h>
9aba53d5dSSunil Goutham #include <linux/pci.h>
10aba53d5dSSunil Goutham
11aba53d5dSSunil Goutham #include "rvu_struct.h"
12aba53d5dSSunil Goutham #include "rvu_reg.h"
13aba53d5dSSunil Goutham #include "rvu.h"
146b3321baSSunil Goutham #include "npc.h"
1500efd99eSNithin Dabilpuram #include "mcs.h"
16aba53d5dSSunil Goutham #include "cgx.h"
171845ada4SRakesh Babu #include "lmac_common.h"
18d6c9784bSRatheesh Kannoth #include "rvu_npc_hash.h"
19aba53d5dSSunil Goutham
209a946defSVamsi Attunuru static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
2127150bc4SGeetha sowjanya static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
2227150bc4SGeetha sowjanya int type, int chan_id);
23967db352SNaveen Mamindlapalli static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
24967db352SNaveen Mamindlapalli int type, bool add);
25e8e095b3SSunil Goutham static int nix_setup_ipolicers(struct rvu *rvu,
26e8e095b3SSunil Goutham struct nix_hw *nix_hw, int blkaddr);
2707cccffdSGeetha sowjanya static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
28e8e095b3SSunil Goutham static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
29e8e095b3SSunil Goutham struct nix_hw *nix_hw, u16 pcifunc);
30e8e095b3SSunil Goutham static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
31e8e095b3SSunil Goutham static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
32e8e095b3SSunil Goutham u32 leaf_prof);
3314e94f94SHariprasad Kelam static const char *nix_get_ctx_name(int ctype);
344b05528eSSunil Goutham
3552d3d327SSunil Goutham enum mc_tbl_sz {
3652d3d327SSunil Goutham MC_TBL_SZ_256,
3752d3d327SSunil Goutham MC_TBL_SZ_512,
3852d3d327SSunil Goutham MC_TBL_SZ_1K,
3952d3d327SSunil Goutham MC_TBL_SZ_2K,
4052d3d327SSunil Goutham MC_TBL_SZ_4K,
4152d3d327SSunil Goutham MC_TBL_SZ_8K,
4252d3d327SSunil Goutham MC_TBL_SZ_16K,
4352d3d327SSunil Goutham MC_TBL_SZ_32K,
4452d3d327SSunil Goutham MC_TBL_SZ_64K,
4552d3d327SSunil Goutham };
4652d3d327SSunil Goutham
4752d3d327SSunil Goutham enum mc_buf_cnt {
4852d3d327SSunil Goutham MC_BUF_CNT_8,
4952d3d327SSunil Goutham MC_BUF_CNT_16,
5052d3d327SSunil Goutham MC_BUF_CNT_32,
5152d3d327SSunil Goutham MC_BUF_CNT_64,
5252d3d327SSunil Goutham MC_BUF_CNT_128,
5352d3d327SSunil Goutham MC_BUF_CNT_256,
5452d3d327SSunil Goutham MC_BUF_CNT_512,
5552d3d327SSunil Goutham MC_BUF_CNT_1024,
5652d3d327SSunil Goutham MC_BUF_CNT_2048,
5752d3d327SSunil Goutham };
5852d3d327SSunil Goutham
59a27d7659SKrzysztof Kanas enum nix_makr_fmt_indexes {
60a27d7659SKrzysztof Kanas NIX_MARK_CFG_IP_DSCP_RED,
61a27d7659SKrzysztof Kanas NIX_MARK_CFG_IP_DSCP_YELLOW,
62a27d7659SKrzysztof Kanas NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63a27d7659SKrzysztof Kanas NIX_MARK_CFG_IP_ECN_RED,
64a27d7659SKrzysztof Kanas NIX_MARK_CFG_IP_ECN_YELLOW,
65a27d7659SKrzysztof Kanas NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66a27d7659SKrzysztof Kanas NIX_MARK_CFG_VLAN_DEI_RED,
67a27d7659SKrzysztof Kanas NIX_MARK_CFG_VLAN_DEI_YELLOW,
68a27d7659SKrzysztof Kanas NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
69a27d7659SKrzysztof Kanas NIX_MARK_CFG_MAX,
70a27d7659SKrzysztof Kanas };
71a27d7659SKrzysztof Kanas
7252d3d327SSunil Goutham /* For now considering MC resources needed for broadcast
7352d3d327SSunil Goutham * pkt replication only. i.e 256 HWVFs + 12 PFs.
7452d3d327SSunil Goutham */
7552d3d327SSunil Goutham #define MC_TBL_SIZE MC_TBL_SZ_512
7652d3d327SSunil Goutham #define MC_BUF_CNT MC_BUF_CNT_128
7752d3d327SSunil Goutham
7852d3d327SSunil Goutham struct mce {
7952d3d327SSunil Goutham struct hlist_node node;
8052d3d327SSunil Goutham u16 pcifunc;
8152d3d327SSunil Goutham };
8252d3d327SSunil Goutham
rvu_get_next_nix_blkaddr(struct rvu * rvu,int blkaddr)83221f3dffSRakesh Babu int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
84221f3dffSRakesh Babu {
85221f3dffSRakesh Babu int i = 0;
86221f3dffSRakesh Babu
87221f3dffSRakesh Babu /*If blkaddr is 0, return the first nix block address*/
88221f3dffSRakesh Babu if (blkaddr == 0)
89221f3dffSRakesh Babu return rvu->nix_blkaddr[blkaddr];
90221f3dffSRakesh Babu
91221f3dffSRakesh Babu while (i + 1 < MAX_NIX_BLKS) {
92221f3dffSRakesh Babu if (rvu->nix_blkaddr[i] == blkaddr)
93221f3dffSRakesh Babu return rvu->nix_blkaddr[i + 1];
94221f3dffSRakesh Babu i++;
95221f3dffSRakesh Babu }
96221f3dffSRakesh Babu
97221f3dffSRakesh Babu return 0;
98221f3dffSRakesh Babu }
99221f3dffSRakesh Babu
is_nixlf_attached(struct rvu * rvu,u16 pcifunc)100f9274958SSunil Goutham bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
101f9274958SSunil Goutham {
102f9274958SSunil Goutham struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
103f9274958SSunil Goutham int blkaddr;
104f9274958SSunil Goutham
105f9274958SSunil Goutham blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
106f9274958SSunil Goutham if (!pfvf->nixlf || blkaddr < 0)
107f9274958SSunil Goutham return false;
108f9274958SSunil Goutham return true;
109f9274958SSunil Goutham }
110f9274958SSunil Goutham
rvu_get_nixlf_count(struct rvu * rvu)111fefefd99SSunil Goutham int rvu_get_nixlf_count(struct rvu *rvu)
112fefefd99SSunil Goutham {
113221f3dffSRakesh Babu int blkaddr = 0, max = 0;
114fefefd99SSunil Goutham struct rvu_block *block;
115fefefd99SSunil Goutham
116221f3dffSRakesh Babu blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
117221f3dffSRakesh Babu while (blkaddr) {
118fefefd99SSunil Goutham block = &rvu->hw->block[blkaddr];
119221f3dffSRakesh Babu max += block->lf.max;
120221f3dffSRakesh Babu blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
121221f3dffSRakesh Babu }
122221f3dffSRakesh Babu return max;
123fefefd99SSunil Goutham }
124fefefd99SSunil Goutham
nix_get_nixlf(struct rvu * rvu,u16 pcifunc,int * nixlf,int * nix_blkaddr)12552ccbdacSSunil Goutham int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
12652ccbdacSSunil Goutham {
12752ccbdacSSunil Goutham struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
12852ccbdacSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
12952ccbdacSSunil Goutham int blkaddr;
13052ccbdacSSunil Goutham
13152ccbdacSSunil Goutham blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
13252ccbdacSSunil Goutham if (!pfvf->nixlf || blkaddr < 0)
13352ccbdacSSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
13452ccbdacSSunil Goutham
13552ccbdacSSunil Goutham *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
13652ccbdacSSunil Goutham if (*nixlf < 0)
13752ccbdacSSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
13852ccbdacSSunil Goutham
13952ccbdacSSunil Goutham if (nix_blkaddr)
14052ccbdacSSunil Goutham *nix_blkaddr = blkaddr;
14152ccbdacSSunil Goutham
14252ccbdacSSunil Goutham return 0;
14352ccbdacSSunil Goutham }
14452ccbdacSSunil Goutham
nix_get_struct_ptrs(struct rvu * rvu,u16 pcifunc,struct nix_hw ** nix_hw,int * blkaddr)145967db352SNaveen Mamindlapalli int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
146967db352SNaveen Mamindlapalli struct nix_hw **nix_hw, int *blkaddr)
147967db352SNaveen Mamindlapalli {
148967db352SNaveen Mamindlapalli struct rvu_pfvf *pfvf;
149967db352SNaveen Mamindlapalli
150967db352SNaveen Mamindlapalli pfvf = rvu_get_pfvf(rvu, pcifunc);
151967db352SNaveen Mamindlapalli *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
152967db352SNaveen Mamindlapalli if (!pfvf->nixlf || *blkaddr < 0)
153967db352SNaveen Mamindlapalli return NIX_AF_ERR_AF_LF_INVALID;
154967db352SNaveen Mamindlapalli
155967db352SNaveen Mamindlapalli *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
156967db352SNaveen Mamindlapalli if (!*nix_hw)
157967db352SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
158967db352SNaveen Mamindlapalli return 0;
159967db352SNaveen Mamindlapalli }
160967db352SNaveen Mamindlapalli
nix_mce_list_init(struct nix_mce_list * list,int max)16152d3d327SSunil Goutham static void nix_mce_list_init(struct nix_mce_list *list, int max)
16252d3d327SSunil Goutham {
16352d3d327SSunil Goutham INIT_HLIST_HEAD(&list->head);
16452d3d327SSunil Goutham list->count = 0;
16552d3d327SSunil Goutham list->max = max;
16652d3d327SSunil Goutham }
16752d3d327SSunil Goutham
nix_alloc_mce_list(struct nix_mcast * mcast,int count)16852d3d327SSunil Goutham static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
16952d3d327SSunil Goutham {
17052d3d327SSunil Goutham int idx;
17152d3d327SSunil Goutham
17252d3d327SSunil Goutham if (!mcast)
17352d3d327SSunil Goutham return 0;
17452d3d327SSunil Goutham
17552d3d327SSunil Goutham idx = mcast->next_free_mce;
17652d3d327SSunil Goutham mcast->next_free_mce += count;
17752d3d327SSunil Goutham return idx;
17852d3d327SSunil Goutham }
17952d3d327SSunil Goutham
get_nix_hw(struct rvu_hwinfo * hw,int blkaddr)180221f3dffSRakesh Babu struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
181ffb0abd7SSunil Goutham {
182221f3dffSRakesh Babu int nix_blkaddr = 0, i = 0;
183221f3dffSRakesh Babu struct rvu *rvu = hw->rvu;
184ffb0abd7SSunil Goutham
185221f3dffSRakesh Babu nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
186221f3dffSRakesh Babu while (nix_blkaddr) {
187221f3dffSRakesh Babu if (blkaddr == nix_blkaddr && hw->nix)
188221f3dffSRakesh Babu return &hw->nix[i];
189221f3dffSRakesh Babu nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
190221f3dffSRakesh Babu i++;
191221f3dffSRakesh Babu }
192ffb0abd7SSunil Goutham return NULL;
193ffb0abd7SSunil Goutham }
194ffb0abd7SSunil Goutham
nix_get_dwrr_mtu_reg(struct rvu_hwinfo * hw,int smq_link_type)195bbba125eSSunil Goutham int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
196bbba125eSSunil Goutham {
197bbba125eSSunil Goutham if (hw->cap.nix_multiple_dwrr_mtu)
198bbba125eSSunil Goutham return NIX_AF_DWRR_MTUX(smq_link_type);
199bbba125eSSunil Goutham
200bbba125eSSunil Goutham if (smq_link_type == SMQ_LINK_TYPE_SDP)
201bbba125eSSunil Goutham return NIX_AF_DWRR_SDP_MTU;
202bbba125eSSunil Goutham
203bbba125eSSunil Goutham /* Here it's same reg for RPM and LBK */
204bbba125eSSunil Goutham return NIX_AF_DWRR_RPM_MTU;
205bbba125eSSunil Goutham }
206bbba125eSSunil Goutham
convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)20776660df2SSunil Goutham u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
20876660df2SSunil Goutham {
20976660df2SSunil Goutham dwrr_mtu &= 0x1FULL;
21076660df2SSunil Goutham
21176660df2SSunil Goutham /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
21276660df2SSunil Goutham * Value of 4 is reserved for MTU value of 9728 bytes.
21376660df2SSunil Goutham * Value of 5 is reserved for MTU value of 10240 bytes.
21476660df2SSunil Goutham */
21576660df2SSunil Goutham switch (dwrr_mtu) {
21676660df2SSunil Goutham case 4:
21776660df2SSunil Goutham return 9728;
21876660df2SSunil Goutham case 5:
21976660df2SSunil Goutham return 10240;
22076660df2SSunil Goutham default:
22176660df2SSunil Goutham return BIT_ULL(dwrr_mtu);
22276660df2SSunil Goutham }
22376660df2SSunil Goutham
22476660df2SSunil Goutham return 0;
22576660df2SSunil Goutham }
22676660df2SSunil Goutham
convert_bytes_to_dwrr_mtu(u32 bytes)22776660df2SSunil Goutham u32 convert_bytes_to_dwrr_mtu(u32 bytes)
22876660df2SSunil Goutham {
22976660df2SSunil Goutham /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
23076660df2SSunil Goutham * Value of 4 is reserved for MTU value of 9728 bytes.
23176660df2SSunil Goutham * Value of 5 is reserved for MTU value of 10240 bytes.
23276660df2SSunil Goutham */
23376660df2SSunil Goutham if (bytes > BIT_ULL(16))
23476660df2SSunil Goutham return 0;
23576660df2SSunil Goutham
23676660df2SSunil Goutham switch (bytes) {
23776660df2SSunil Goutham case 9728:
23876660df2SSunil Goutham return 4;
23976660df2SSunil Goutham case 10240:
24076660df2SSunil Goutham return 5;
24176660df2SSunil Goutham default:
24276660df2SSunil Goutham return ilog2(bytes);
24376660df2SSunil Goutham }
24476660df2SSunil Goutham
24576660df2SSunil Goutham return 0;
24676660df2SSunil Goutham }
24776660df2SSunil Goutham
nix_rx_sync(struct rvu * rvu,int blkaddr)248c554f9c1SGeetha sowjanya static void nix_rx_sync(struct rvu *rvu, int blkaddr)
249c554f9c1SGeetha sowjanya {
250c554f9c1SGeetha sowjanya int err;
251c554f9c1SGeetha sowjanya
252c554f9c1SGeetha sowjanya /* Sync all in flight RX packets to LLC/DRAM */
253c554f9c1SGeetha sowjanya rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
254c554f9c1SGeetha sowjanya err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
255c554f9c1SGeetha sowjanya if (err)
256fcef709cSSunil Goutham dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
257fcef709cSSunil Goutham
258fcef709cSSunil Goutham /* SW_SYNC ensures all existing transactions are finished and pkts
259fcef709cSSunil Goutham * are written to LLC/DRAM, queues should be teared down after
260fcef709cSSunil Goutham * successful SW_SYNC. Due to a HW errata, in some rare scenarios
261fcef709cSSunil Goutham * an existing transaction might end after SW_SYNC operation. To
262fcef709cSSunil Goutham * ensure operation is fully done, do the SW_SYNC twice.
263fcef709cSSunil Goutham */
264fcef709cSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
265fcef709cSSunil Goutham err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
266fcef709cSSunil Goutham if (err)
267fcef709cSSunil Goutham dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
268c554f9c1SGeetha sowjanya }
269c554f9c1SGeetha sowjanya
is_valid_txschq(struct rvu * rvu,int blkaddr,int lvl,u16 pcifunc,u16 schq)270ffb0abd7SSunil Goutham static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
271ffb0abd7SSunil Goutham int lvl, u16 pcifunc, u16 schq)
272ffb0abd7SSunil Goutham {
2735d9b976dSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
274ffb0abd7SSunil Goutham struct nix_txsch *txsch;
275ffb0abd7SSunil Goutham struct nix_hw *nix_hw;
27626dda7daSNithin Dabilpuram u16 map_func;
277ffb0abd7SSunil Goutham
278ffb0abd7SSunil Goutham nix_hw = get_nix_hw(rvu->hw, blkaddr);
279ffb0abd7SSunil Goutham if (!nix_hw)
280ffb0abd7SSunil Goutham return false;
281ffb0abd7SSunil Goutham
282ffb0abd7SSunil Goutham txsch = &nix_hw->txsch[lvl];
283ffb0abd7SSunil Goutham /* Check out of bounds */
284ffb0abd7SSunil Goutham if (schq >= txsch->schq.max)
285ffb0abd7SSunil Goutham return false;
286ffb0abd7SSunil Goutham
2870964fc8fSStanislaw Kardach mutex_lock(&rvu->rsrc_lock);
28826dda7daSNithin Dabilpuram map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2890964fc8fSStanislaw Kardach mutex_unlock(&rvu->rsrc_lock);
29026dda7daSNithin Dabilpuram
2915d9b976dSSunil Goutham /* TLs aggegating traffic are shared across PF and VFs */
2925d9b976dSSunil Goutham if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2935d9b976dSSunil Goutham if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
294ffb0abd7SSunil Goutham return false;
2955d9b976dSSunil Goutham else
2965d9b976dSSunil Goutham return true;
2975d9b976dSSunil Goutham }
29826dda7daSNithin Dabilpuram
2995d9b976dSSunil Goutham if (map_func != pcifunc)
30026dda7daSNithin Dabilpuram return false;
30126dda7daSNithin Dabilpuram
302ffb0abd7SSunil Goutham return true;
303ffb0abd7SSunil Goutham }
304ffb0abd7SSunil Goutham
nix_interface_init(struct rvu * rvu,u16 pcifunc,int type,int nixlf,struct nix_lf_alloc_rsp * rsp,bool loop)305039190bbSSubbaraya Sundeep static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
306aefaa8c7SHarman Kalra struct nix_lf_alloc_rsp *rsp, bool loop)
30794d942c5SGeetha sowjanya {
308fe1939bbSRadha Mohan Chintakuntla struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
309fe1939bbSRadha Mohan Chintakuntla u16 req_chan_base, req_chan_end, req_chan_cnt;
310039190bbSSubbaraya Sundeep struct rvu_hwinfo *hw = rvu->hw;
311fe1939bbSRadha Mohan Chintakuntla struct sdp_node_info *sdp_info;
312fe1939bbSRadha Mohan Chintakuntla int pkind, pf, vf, lbkid, vfid;
31394d942c5SGeetha sowjanya u8 cgx_id, lmac_id;
314fe1939bbSRadha Mohan Chintakuntla bool from_vf;
3154b05528eSSunil Goutham int err;
31694d942c5SGeetha sowjanya
31794d942c5SGeetha sowjanya pf = rvu_get_pf(pcifunc);
318fe1939bbSRadha Mohan Chintakuntla if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
319fe1939bbSRadha Mohan Chintakuntla type != NIX_INTF_TYPE_SDP)
32094d942c5SGeetha sowjanya return 0;
32194d942c5SGeetha sowjanya
32294d942c5SGeetha sowjanya switch (type) {
32394d942c5SGeetha sowjanya case NIX_INTF_TYPE_CGX:
32494d942c5SGeetha sowjanya pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
32594d942c5SGeetha sowjanya rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
32694d942c5SGeetha sowjanya
32794d942c5SGeetha sowjanya pkind = rvu_npc_get_pkind(rvu, pf);
32894d942c5SGeetha sowjanya if (pkind < 0) {
32994d942c5SGeetha sowjanya dev_err(rvu->dev,
33094d942c5SGeetha sowjanya "PF_Func 0x%x: Invalid pkind\n", pcifunc);
33194d942c5SGeetha sowjanya return -EINVAL;
33294d942c5SGeetha sowjanya }
333242da439SSubbaraya Sundeep pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
334f5721f76SStanislaw Kardach pfvf->tx_chan_base = pfvf->rx_chan_base;
335f5721f76SStanislaw Kardach pfvf->rx_chan_cnt = 1;
336f5721f76SStanislaw Kardach pfvf->tx_chan_cnt = 1;
337039190bbSSubbaraya Sundeep rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
338039190bbSSubbaraya Sundeep
33994d942c5SGeetha sowjanya cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
34094d942c5SGeetha sowjanya rvu_npc_set_pkind(rvu, pkind, pfvf);
341f7e086e7SGeetha sowjanya
34294d942c5SGeetha sowjanya break;
34394d942c5SGeetha sowjanya case NIX_INTF_TYPE_LBK:
3448bb991c5STomasz Duszynski vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
345a7faa68bSSubbaraya Sundeep
346c5a73b63SSubbaraya Sundeep /* If NIX1 block is present on the silicon then NIXes are
347c5a73b63SSubbaraya Sundeep * assigned alternatively for lbk interfaces. NIX0 should
348c5a73b63SSubbaraya Sundeep * send packets on lbk link 1 channels and NIX1 should send
349c5a73b63SSubbaraya Sundeep * on lbk link 0 channels for the communication between
350c5a73b63SSubbaraya Sundeep * NIX0 and NIX1.
351c5a73b63SSubbaraya Sundeep */
352c5a73b63SSubbaraya Sundeep lbkid = 0;
353c5a73b63SSubbaraya Sundeep if (rvu->hw->lbk_links > 1)
354c5a73b63SSubbaraya Sundeep lbkid = vf & 0x1 ? 0 : 1;
355c5a73b63SSubbaraya Sundeep
356aefaa8c7SHarman Kalra /* By default NIX0 is configured to send packet on lbk link 1
357aefaa8c7SHarman Kalra * (which corresponds to LBK1), same packet will receive on
358aefaa8c7SHarman Kalra * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
359aefaa8c7SHarman Kalra * (which corresponds to LBK2) packet will receive on NIX0 lbk
360aefaa8c7SHarman Kalra * link 1.
361aefaa8c7SHarman Kalra * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
362aefaa8c7SHarman Kalra * transmits and receives on lbk link 0, whick corresponds
363aefaa8c7SHarman Kalra * to LBK1 block, back to back connectivity between NIX and
364aefaa8c7SHarman Kalra * LBK can be achieved (which is similar to 96xx)
365aefaa8c7SHarman Kalra *
366aefaa8c7SHarman Kalra * RX TX
367aefaa8c7SHarman Kalra * NIX0 lbk link 1 (LBK2) 1 (LBK1)
368aefaa8c7SHarman Kalra * NIX0 lbk link 0 (LBK0) 0 (LBK0)
369aefaa8c7SHarman Kalra * NIX1 lbk link 0 (LBK1) 0 (LBK2)
370aefaa8c7SHarman Kalra * NIX1 lbk link 1 (LBK3) 1 (LBK3)
371aefaa8c7SHarman Kalra */
372aefaa8c7SHarman Kalra if (loop)
373aefaa8c7SHarman Kalra lbkid = !lbkid;
374aefaa8c7SHarman Kalra
375a7faa68bSSubbaraya Sundeep /* Note that AF's VFs work in pairs and talk over consecutive
376a7faa68bSSubbaraya Sundeep * loopback channels.Therefore if odd number of AF VFs are
377a7faa68bSSubbaraya Sundeep * enabled then the last VF remains with no pair.
378a7faa68bSSubbaraya Sundeep */
379242da439SSubbaraya Sundeep pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
380c5a73b63SSubbaraya Sundeep pfvf->tx_chan_base = vf & 0x1 ?
381242da439SSubbaraya Sundeep rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
382242da439SSubbaraya Sundeep rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
3838bb991c5STomasz Duszynski pfvf->rx_chan_cnt = 1;
3848bb991c5STomasz Duszynski pfvf->tx_chan_cnt = 1;
385039190bbSSubbaraya Sundeep rsp->tx_link = hw->cgx_links + lbkid;
386aefaa8c7SHarman Kalra pfvf->lbkid = lbkid;
387ac059d16SGeetha sowjanya rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
3888bb991c5STomasz Duszynski rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
389d450a235SNalla, Pradeep pfvf->rx_chan_base,
390967db352SNaveen Mamindlapalli pfvf->rx_chan_cnt);
391fe1939bbSRadha Mohan Chintakuntla
392fe1939bbSRadha Mohan Chintakuntla break;
393fe1939bbSRadha Mohan Chintakuntla case NIX_INTF_TYPE_SDP:
394fe1939bbSRadha Mohan Chintakuntla from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
395fe1939bbSRadha Mohan Chintakuntla parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
396fe1939bbSRadha Mohan Chintakuntla sdp_info = parent_pf->sdp_info;
397fe1939bbSRadha Mohan Chintakuntla if (!sdp_info) {
398fe1939bbSRadha Mohan Chintakuntla dev_err(rvu->dev, "Invalid sdp_info pointer\n");
399fe1939bbSRadha Mohan Chintakuntla return -EINVAL;
400fe1939bbSRadha Mohan Chintakuntla }
401fe1939bbSRadha Mohan Chintakuntla if (from_vf) {
402fe1939bbSRadha Mohan Chintakuntla req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
403fe1939bbSRadha Mohan Chintakuntla sdp_info->num_pf_rings;
404fe1939bbSRadha Mohan Chintakuntla vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
405fe1939bbSRadha Mohan Chintakuntla for (vfid = 0; vfid < vf; vfid++)
406fe1939bbSRadha Mohan Chintakuntla req_chan_base += sdp_info->vf_rings[vfid];
407fe1939bbSRadha Mohan Chintakuntla req_chan_cnt = sdp_info->vf_rings[vf];
408fe1939bbSRadha Mohan Chintakuntla req_chan_end = req_chan_base + req_chan_cnt - 1;
409fe1939bbSRadha Mohan Chintakuntla if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
410fe1939bbSRadha Mohan Chintakuntla req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
411fe1939bbSRadha Mohan Chintakuntla dev_err(rvu->dev,
412fe1939bbSRadha Mohan Chintakuntla "PF_Func 0x%x: Invalid channel base and count\n",
413fe1939bbSRadha Mohan Chintakuntla pcifunc);
414fe1939bbSRadha Mohan Chintakuntla return -EINVAL;
415fe1939bbSRadha Mohan Chintakuntla }
416fe1939bbSRadha Mohan Chintakuntla } else {
417fe1939bbSRadha Mohan Chintakuntla req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
418fe1939bbSRadha Mohan Chintakuntla req_chan_cnt = sdp_info->num_pf_rings;
419fe1939bbSRadha Mohan Chintakuntla }
420fe1939bbSRadha Mohan Chintakuntla
421fe1939bbSRadha Mohan Chintakuntla pfvf->rx_chan_base = req_chan_base;
422fe1939bbSRadha Mohan Chintakuntla pfvf->rx_chan_cnt = req_chan_cnt;
423fe1939bbSRadha Mohan Chintakuntla pfvf->tx_chan_base = pfvf->rx_chan_base;
424fe1939bbSRadha Mohan Chintakuntla pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
425fe1939bbSRadha Mohan Chintakuntla
426fe1939bbSRadha Mohan Chintakuntla rsp->tx_link = hw->cgx_links + hw->lbk_links;
427fe1939bbSRadha Mohan Chintakuntla rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
428fe1939bbSRadha Mohan Chintakuntla pfvf->rx_chan_base,
429fe1939bbSRadha Mohan Chintakuntla pfvf->rx_chan_cnt);
43094d942c5SGeetha sowjanya break;
43194d942c5SGeetha sowjanya }
4324b05528eSSunil Goutham
43375900140SSunil Goutham /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
43475900140SSunil Goutham * RVU PF/VF's MAC address.
43575900140SSunil Goutham */
43675900140SSunil Goutham rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
43775900140SSunil Goutham pfvf->rx_chan_base, pfvf->mac_addr);
43875900140SSunil Goutham
4394b05528eSSunil Goutham /* Add this PF_FUNC to bcast pkt replication list */
440967db352SNaveen Mamindlapalli err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
4414b05528eSSunil Goutham if (err) {
4424b05528eSSunil Goutham dev_err(rvu->dev,
4434b05528eSSunil Goutham "Bcast list, failed to enable PF_FUNC 0x%x\n",
4444b05528eSSunil Goutham pcifunc);
44575900140SSunil Goutham return err;
4464b05528eSSunil Goutham }
447967db352SNaveen Mamindlapalli /* Install MCAM rule matching Ethernet broadcast mac address */
44875900140SSunil Goutham rvu_npc_install_bcast_match_entry(rvu, pcifunc,
44975900140SSunil Goutham nixlf, pfvf->rx_chan_base);
450967db352SNaveen Mamindlapalli
4519b7dd87aSSunil Goutham pfvf->maxlen = NIC_HW_MIN_FRS;
4529b7dd87aSSunil Goutham pfvf->minlen = NIC_HW_MIN_FRS;
45375900140SSunil Goutham
45494d942c5SGeetha sowjanya return 0;
45594d942c5SGeetha sowjanya }
45694d942c5SGeetha sowjanya
nix_interface_deinit(struct rvu * rvu,u16 pcifunc,u8 nixlf)4574b05528eSSunil Goutham static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
4584b05528eSSunil Goutham {
4599b7dd87aSSunil Goutham struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
4604b05528eSSunil Goutham int err;
4614b05528eSSunil Goutham
4629b7dd87aSSunil Goutham pfvf->maxlen = 0;
4639b7dd87aSSunil Goutham pfvf->minlen = 0;
4649b7dd87aSSunil Goutham
4654b05528eSSunil Goutham /* Remove this PF_FUNC from bcast pkt replication list */
466967db352SNaveen Mamindlapalli err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
4674b05528eSSunil Goutham if (err) {
4684b05528eSSunil Goutham dev_err(rvu->dev,
4694b05528eSSunil Goutham "Bcast list, failed to disable PF_FUNC 0x%x\n",
4704b05528eSSunil Goutham pcifunc);
4714b05528eSSunil Goutham }
47275900140SSunil Goutham
47375900140SSunil Goutham /* Free and disable any MCAM entries used by this NIX LF */
47475900140SSunil Goutham rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4756f14078eSSunil Kumar Kori
4766f14078eSSunil Kumar Kori /* Disable DMAC filters used */
4776f14078eSSunil Kumar Kori rvu_cgx_disable_dmac_entries(rvu, pcifunc);
4784b05528eSSunil Goutham }
4794b05528eSSunil Goutham
rvu_mbox_handler_nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)48027150bc4SGeetha sowjanya int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
48127150bc4SGeetha sowjanya struct nix_bp_cfg_req *req,
48227150bc4SGeetha sowjanya struct msg_rsp *rsp)
48327150bc4SGeetha sowjanya {
48427150bc4SGeetha sowjanya u16 pcifunc = req->hdr.pcifunc;
48527150bc4SGeetha sowjanya struct rvu_pfvf *pfvf;
48627150bc4SGeetha sowjanya int blkaddr, pf, type;
48727150bc4SGeetha sowjanya u16 chan_base, chan;
48827150bc4SGeetha sowjanya u64 cfg;
48927150bc4SGeetha sowjanya
49027150bc4SGeetha sowjanya pf = rvu_get_pf(pcifunc);
49127150bc4SGeetha sowjanya type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
49227150bc4SGeetha sowjanya if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
49327150bc4SGeetha sowjanya return 0;
49427150bc4SGeetha sowjanya
49527150bc4SGeetha sowjanya pfvf = rvu_get_pfvf(rvu, pcifunc);
49627150bc4SGeetha sowjanya blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
49727150bc4SGeetha sowjanya
49827150bc4SGeetha sowjanya chan_base = pfvf->rx_chan_base + req->chan_base;
49927150bc4SGeetha sowjanya for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
50027150bc4SGeetha sowjanya cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
50127150bc4SGeetha sowjanya rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
50227150bc4SGeetha sowjanya cfg & ~BIT_ULL(16));
50327150bc4SGeetha sowjanya }
50427150bc4SGeetha sowjanya return 0;
50527150bc4SGeetha sowjanya }
50627150bc4SGeetha sowjanya
rvu_nix_get_bpid(struct rvu * rvu,struct nix_bp_cfg_req * req,int type,int chan_id)50727150bc4SGeetha sowjanya static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
50827150bc4SGeetha sowjanya int type, int chan_id)
50927150bc4SGeetha sowjanya {
510fe1939bbSRadha Mohan Chintakuntla int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
511fe1939bbSRadha Mohan Chintakuntla u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
51227150bc4SGeetha sowjanya struct rvu_hwinfo *hw = rvu->hw;
51327150bc4SGeetha sowjanya struct rvu_pfvf *pfvf;
51427150bc4SGeetha sowjanya u8 cgx_id, lmac_id;
51527150bc4SGeetha sowjanya u64 cfg;
51627150bc4SGeetha sowjanya
51727150bc4SGeetha sowjanya blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
51827150bc4SGeetha sowjanya cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
51927150bc4SGeetha sowjanya lmac_chan_cnt = cfg & 0xFF;
52027150bc4SGeetha sowjanya
52127150bc4SGeetha sowjanya cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
52227150bc4SGeetha sowjanya lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
52300bfe94eSSunil Goutham
52400bfe94eSSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
52500bfe94eSSunil Goutham sdp_chan_cnt = cfg & 0xFFF;
526fe1939bbSRadha Mohan Chintakuntla sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
52727150bc4SGeetha sowjanya
52827150bc4SGeetha sowjanya pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
52927150bc4SGeetha sowjanya
53027150bc4SGeetha sowjanya /* Backpressure IDs range division
53127150bc4SGeetha sowjanya * CGX channles are mapped to (0 - 191) BPIDs
53227150bc4SGeetha sowjanya * LBK channles are mapped to (192 - 255) BPIDs
53327150bc4SGeetha sowjanya * SDP channles are mapped to (256 - 511) BPIDs
53427150bc4SGeetha sowjanya *
53527150bc4SGeetha sowjanya * Lmac channles and bpids mapped as follows
53627150bc4SGeetha sowjanya * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
53727150bc4SGeetha sowjanya * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
53827150bc4SGeetha sowjanya * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
53927150bc4SGeetha sowjanya */
54027150bc4SGeetha sowjanya switch (type) {
54127150bc4SGeetha sowjanya case NIX_INTF_TYPE_CGX:
542e7400038SHariprasad Kelam if ((req->chan_base + req->chan_cnt) > 16)
54327150bc4SGeetha sowjanya return -EINVAL;
54427150bc4SGeetha sowjanya rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
54527150bc4SGeetha sowjanya /* Assign bpid based on cgx, lmac and chan id */
54627150bc4SGeetha sowjanya bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
54727150bc4SGeetha sowjanya (lmac_id * lmac_chan_cnt) + req->chan_base;
54827150bc4SGeetha sowjanya
54927150bc4SGeetha sowjanya if (req->bpid_per_chan)
55027150bc4SGeetha sowjanya bpid += chan_id;
55127150bc4SGeetha sowjanya if (bpid > cgx_bpid_cnt)
55227150bc4SGeetha sowjanya return -EINVAL;
55327150bc4SGeetha sowjanya break;
55427150bc4SGeetha sowjanya
55527150bc4SGeetha sowjanya case NIX_INTF_TYPE_LBK:
55627150bc4SGeetha sowjanya if ((req->chan_base + req->chan_cnt) > 63)
55727150bc4SGeetha sowjanya return -EINVAL;
55827150bc4SGeetha sowjanya bpid = cgx_bpid_cnt + req->chan_base;
55927150bc4SGeetha sowjanya if (req->bpid_per_chan)
56027150bc4SGeetha sowjanya bpid += chan_id;
56127150bc4SGeetha sowjanya if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
56227150bc4SGeetha sowjanya return -EINVAL;
56327150bc4SGeetha sowjanya break;
564fe1939bbSRadha Mohan Chintakuntla case NIX_INTF_TYPE_SDP:
565fe1939bbSRadha Mohan Chintakuntla if ((req->chan_base + req->chan_cnt) > 255)
566fe1939bbSRadha Mohan Chintakuntla return -EINVAL;
567fe1939bbSRadha Mohan Chintakuntla
568fe1939bbSRadha Mohan Chintakuntla bpid = sdp_bpid_cnt + req->chan_base;
569fe1939bbSRadha Mohan Chintakuntla if (req->bpid_per_chan)
570fe1939bbSRadha Mohan Chintakuntla bpid += chan_id;
571fe1939bbSRadha Mohan Chintakuntla
572fe1939bbSRadha Mohan Chintakuntla if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
573fe1939bbSRadha Mohan Chintakuntla return -EINVAL;
574fe1939bbSRadha Mohan Chintakuntla break;
57527150bc4SGeetha sowjanya default:
57627150bc4SGeetha sowjanya return -EINVAL;
57727150bc4SGeetha sowjanya }
57827150bc4SGeetha sowjanya return bpid;
57927150bc4SGeetha sowjanya }
58027150bc4SGeetha sowjanya
rvu_mbox_handler_nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)58127150bc4SGeetha sowjanya int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
58227150bc4SGeetha sowjanya struct nix_bp_cfg_req *req,
58327150bc4SGeetha sowjanya struct nix_bp_cfg_rsp *rsp)
58427150bc4SGeetha sowjanya {
58527150bc4SGeetha sowjanya int blkaddr, pf, type, chan_id = 0;
58627150bc4SGeetha sowjanya u16 pcifunc = req->hdr.pcifunc;
58727150bc4SGeetha sowjanya struct rvu_pfvf *pfvf;
58827150bc4SGeetha sowjanya u16 chan_base, chan;
58927150bc4SGeetha sowjanya s16 bpid, bpid_base;
59027150bc4SGeetha sowjanya u64 cfg;
59127150bc4SGeetha sowjanya
59227150bc4SGeetha sowjanya pf = rvu_get_pf(pcifunc);
59327150bc4SGeetha sowjanya type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
594fe1939bbSRadha Mohan Chintakuntla if (is_sdp_pfvf(pcifunc))
595fe1939bbSRadha Mohan Chintakuntla type = NIX_INTF_TYPE_SDP;
59627150bc4SGeetha sowjanya
597fe1939bbSRadha Mohan Chintakuntla /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
598fe1939bbSRadha Mohan Chintakuntla if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
599fe1939bbSRadha Mohan Chintakuntla type != NIX_INTF_TYPE_SDP)
60027150bc4SGeetha sowjanya return 0;
60127150bc4SGeetha sowjanya
60227150bc4SGeetha sowjanya pfvf = rvu_get_pfvf(rvu, pcifunc);
60327150bc4SGeetha sowjanya blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
60427150bc4SGeetha sowjanya
60527150bc4SGeetha sowjanya bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
60627150bc4SGeetha sowjanya chan_base = pfvf->rx_chan_base + req->chan_base;
60727150bc4SGeetha sowjanya bpid = bpid_base;
60827150bc4SGeetha sowjanya
60927150bc4SGeetha sowjanya for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
61027150bc4SGeetha sowjanya if (bpid < 0) {
611a7442ec3SColin Ian King dev_warn(rvu->dev, "Fail to enable backpressure\n");
61227150bc4SGeetha sowjanya return -EINVAL;
61327150bc4SGeetha sowjanya }
61427150bc4SGeetha sowjanya
61527150bc4SGeetha sowjanya cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
616fe1939bbSRadha Mohan Chintakuntla cfg &= ~GENMASK_ULL(8, 0);
61727150bc4SGeetha sowjanya rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
618fe1939bbSRadha Mohan Chintakuntla cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
61927150bc4SGeetha sowjanya chan_id++;
62027150bc4SGeetha sowjanya bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
62127150bc4SGeetha sowjanya }
62227150bc4SGeetha sowjanya
62327150bc4SGeetha sowjanya for (chan = 0; chan < req->chan_cnt; chan++) {
62427150bc4SGeetha sowjanya /* Map channel and bpid assign to it */
62527150bc4SGeetha sowjanya rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
62627150bc4SGeetha sowjanya (bpid_base & 0x3FF);
62727150bc4SGeetha sowjanya if (req->bpid_per_chan)
62827150bc4SGeetha sowjanya bpid_base++;
62927150bc4SGeetha sowjanya }
63027150bc4SGeetha sowjanya rsp->chan_cnt = req->chan_cnt;
63127150bc4SGeetha sowjanya
63227150bc4SGeetha sowjanya return 0;
63327150bc4SGeetha sowjanya }
63427150bc4SGeetha sowjanya
nix_setup_lso_tso_l3(struct rvu * rvu,int blkaddr,u64 format,bool v4,u64 * fidx)63559360e98SSunil Goutham static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
63659360e98SSunil Goutham u64 format, bool v4, u64 *fidx)
63759360e98SSunil Goutham {
63859360e98SSunil Goutham struct nix_lso_format field = {0};
63959360e98SSunil Goutham
64059360e98SSunil Goutham /* IP's Length field */
64159360e98SSunil Goutham field.layer = NIX_TXLAYER_OL3;
64259360e98SSunil Goutham /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
64359360e98SSunil Goutham field.offset = v4 ? 2 : 4;
64459360e98SSunil Goutham field.sizem1 = 1; /* i.e 2 bytes */
64559360e98SSunil Goutham field.alg = NIX_LSOALG_ADD_PAYLEN;
64659360e98SSunil Goutham rvu_write64(rvu, blkaddr,
64759360e98SSunil Goutham NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
64859360e98SSunil Goutham *(u64 *)&field);
64959360e98SSunil Goutham
65059360e98SSunil Goutham /* No ID field in IPv6 header */
65159360e98SSunil Goutham if (!v4)
65259360e98SSunil Goutham return;
65359360e98SSunil Goutham
65459360e98SSunil Goutham /* IP's ID field */
65559360e98SSunil Goutham field.layer = NIX_TXLAYER_OL3;
65659360e98SSunil Goutham field.offset = 4;
65759360e98SSunil Goutham field.sizem1 = 1; /* i.e 2 bytes */
65859360e98SSunil Goutham field.alg = NIX_LSOALG_ADD_SEGNUM;
65959360e98SSunil Goutham rvu_write64(rvu, blkaddr,
66059360e98SSunil Goutham NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
66159360e98SSunil Goutham *(u64 *)&field);
66259360e98SSunil Goutham }
66359360e98SSunil Goutham
nix_setup_lso_tso_l4(struct rvu * rvu,int blkaddr,u64 format,u64 * fidx)66459360e98SSunil Goutham static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
66559360e98SSunil Goutham u64 format, u64 *fidx)
66659360e98SSunil Goutham {
66759360e98SSunil Goutham struct nix_lso_format field = {0};
66859360e98SSunil Goutham
66959360e98SSunil Goutham /* TCP's sequence number field */
67059360e98SSunil Goutham field.layer = NIX_TXLAYER_OL4;
67159360e98SSunil Goutham field.offset = 4;
67259360e98SSunil Goutham field.sizem1 = 3; /* i.e 4 bytes */
67359360e98SSunil Goutham field.alg = NIX_LSOALG_ADD_OFFSET;
67459360e98SSunil Goutham rvu_write64(rvu, blkaddr,
67559360e98SSunil Goutham NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
67659360e98SSunil Goutham *(u64 *)&field);
67759360e98SSunil Goutham
67859360e98SSunil Goutham /* TCP's flags field */
67959360e98SSunil Goutham field.layer = NIX_TXLAYER_OL4;
68059360e98SSunil Goutham field.offset = 12;
681da5d32e1SNithin Dabilpuram field.sizem1 = 1; /* 2 bytes */
68259360e98SSunil Goutham field.alg = NIX_LSOALG_TCP_FLAGS;
68359360e98SSunil Goutham rvu_write64(rvu, blkaddr,
68459360e98SSunil Goutham NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
68559360e98SSunil Goutham *(u64 *)&field);
68659360e98SSunil Goutham }
68759360e98SSunil Goutham
nix_setup_lso(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)688da5d32e1SNithin Dabilpuram static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
68959360e98SSunil Goutham {
69059360e98SSunil Goutham u64 cfg, idx, fidx = 0;
69159360e98SSunil Goutham
692da5d32e1SNithin Dabilpuram /* Get max HW supported format indices */
693da5d32e1SNithin Dabilpuram cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
694da5d32e1SNithin Dabilpuram nix_hw->lso.total = cfg;
695da5d32e1SNithin Dabilpuram
69659360e98SSunil Goutham /* Enable LSO */
69759360e98SSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
69859360e98SSunil Goutham /* For TSO, set first and middle segment flags to
69959360e98SSunil Goutham * mask out PSH, RST & FIN flags in TCP packet
70059360e98SSunil Goutham */
70159360e98SSunil Goutham cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
70259360e98SSunil Goutham cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
70359360e98SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
70459360e98SSunil Goutham
705da5d32e1SNithin Dabilpuram /* Setup default static LSO formats
706da5d32e1SNithin Dabilpuram *
707da5d32e1SNithin Dabilpuram * Configure format fields for TCPv4 segmentation offload
708da5d32e1SNithin Dabilpuram */
70959360e98SSunil Goutham idx = NIX_LSO_FORMAT_IDX_TSOV4;
71059360e98SSunil Goutham nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
71159360e98SSunil Goutham nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
71259360e98SSunil Goutham
71359360e98SSunil Goutham /* Set rest of the fields to NOP */
71459360e98SSunil Goutham for (; fidx < 8; fidx++) {
71559360e98SSunil Goutham rvu_write64(rvu, blkaddr,
71659360e98SSunil Goutham NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
71759360e98SSunil Goutham }
718da5d32e1SNithin Dabilpuram nix_hw->lso.in_use++;
71959360e98SSunil Goutham
72059360e98SSunil Goutham /* Configure format fields for TCPv6 segmentation offload */
72159360e98SSunil Goutham idx = NIX_LSO_FORMAT_IDX_TSOV6;
72259360e98SSunil Goutham fidx = 0;
72359360e98SSunil Goutham nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
72459360e98SSunil Goutham nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
72559360e98SSunil Goutham
72659360e98SSunil Goutham /* Set rest of the fields to NOP */
72759360e98SSunil Goutham for (; fidx < 8; fidx++) {
72859360e98SSunil Goutham rvu_write64(rvu, blkaddr,
72959360e98SSunil Goutham NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
73059360e98SSunil Goutham }
731da5d32e1SNithin Dabilpuram nix_hw->lso.in_use++;
73259360e98SSunil Goutham }
73359360e98SSunil Goutham
nix_ctx_free(struct rvu * rvu,struct rvu_pfvf * pfvf)734cb30711aSSunil Goutham static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
735cb30711aSSunil Goutham {
736557dd485SGeetha sowjanya kfree(pfvf->rq_bmap);
737557dd485SGeetha sowjanya kfree(pfvf->sq_bmap);
738557dd485SGeetha sowjanya kfree(pfvf->cq_bmap);
739cb30711aSSunil Goutham if (pfvf->rq_ctx)
740cb30711aSSunil Goutham qmem_free(rvu->dev, pfvf->rq_ctx);
741cb30711aSSunil Goutham if (pfvf->sq_ctx)
742cb30711aSSunil Goutham qmem_free(rvu->dev, pfvf->sq_ctx);
743cb30711aSSunil Goutham if (pfvf->cq_ctx)
744cb30711aSSunil Goutham qmem_free(rvu->dev, pfvf->cq_ctx);
745cb30711aSSunil Goutham if (pfvf->rss_ctx)
746cb30711aSSunil Goutham qmem_free(rvu->dev, pfvf->rss_ctx);
747cb30711aSSunil Goutham if (pfvf->nix_qints_ctx)
748cb30711aSSunil Goutham qmem_free(rvu->dev, pfvf->nix_qints_ctx);
749cb30711aSSunil Goutham if (pfvf->cq_ints_ctx)
750cb30711aSSunil Goutham qmem_free(rvu->dev, pfvf->cq_ints_ctx);
751cb30711aSSunil Goutham
752557dd485SGeetha sowjanya pfvf->rq_bmap = NULL;
753557dd485SGeetha sowjanya pfvf->cq_bmap = NULL;
754557dd485SGeetha sowjanya pfvf->sq_bmap = NULL;
755cb30711aSSunil Goutham pfvf->rq_ctx = NULL;
756cb30711aSSunil Goutham pfvf->sq_ctx = NULL;
757cb30711aSSunil Goutham pfvf->cq_ctx = NULL;
758cb30711aSSunil Goutham pfvf->rss_ctx = NULL;
759cb30711aSSunil Goutham pfvf->nix_qints_ctx = NULL;
760cb30711aSSunil Goutham pfvf->cq_ints_ctx = NULL;
761cb30711aSSunil Goutham }
762cb30711aSSunil Goutham
nixlf_rss_ctx_init(struct rvu * rvu,int blkaddr,struct rvu_pfvf * pfvf,int nixlf,int rss_sz,int rss_grps,int hwctx_size,u64 way_mask,bool tag_lsb_as_adder)763cb30711aSSunil Goutham static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
764cb30711aSSunil Goutham struct rvu_pfvf *pfvf, int nixlf,
765ee1e7591SGeetha sowjanya int rss_sz, int rss_grps, int hwctx_size,
76672e192a1SJerin Jacob u64 way_mask, bool tag_lsb_as_adder)
767cb30711aSSunil Goutham {
768cb30711aSSunil Goutham int err, grp, num_indices;
76972e192a1SJerin Jacob u64 val;
770cb30711aSSunil Goutham
771cb30711aSSunil Goutham /* RSS is not requested for this NIXLF */
772cb30711aSSunil Goutham if (!rss_sz)
773cb30711aSSunil Goutham return 0;
774cb30711aSSunil Goutham num_indices = rss_sz * rss_grps;
775cb30711aSSunil Goutham
776cb30711aSSunil Goutham /* Alloc NIX RSS HW context memory and config the base */
777cb30711aSSunil Goutham err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
778cb30711aSSunil Goutham if (err)
779cb30711aSSunil Goutham return err;
780cb30711aSSunil Goutham
781cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
782cb30711aSSunil Goutham (u64)pfvf->rss_ctx->iova);
783cb30711aSSunil Goutham
784cb30711aSSunil Goutham /* Config full RSS table size, enable RSS and caching */
78572e192a1SJerin Jacob val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
78672e192a1SJerin Jacob ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
78772e192a1SJerin Jacob
78872e192a1SJerin Jacob if (tag_lsb_as_adder)
78972e192a1SJerin Jacob val |= BIT_ULL(5);
79072e192a1SJerin Jacob
79172e192a1SJerin Jacob rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
792cb30711aSSunil Goutham /* Config RSS group offset and sizes */
793cb30711aSSunil Goutham for (grp = 0; grp < rss_grps; grp++)
794cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
795cb30711aSSunil Goutham ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
796cb30711aSSunil Goutham return 0;
797cb30711aSSunil Goutham }
798cb30711aSSunil Goutham
nix_aq_enqueue_wait(struct rvu * rvu,struct rvu_block * block,struct nix_aq_inst_s * inst)799ffb0abd7SSunil Goutham static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
800ffb0abd7SSunil Goutham struct nix_aq_inst_s *inst)
801ffb0abd7SSunil Goutham {
802ffb0abd7SSunil Goutham struct admin_queue *aq = block->aq;
803ffb0abd7SSunil Goutham struct nix_aq_res_s *result;
804ffb0abd7SSunil Goutham int timeout = 1000;
805ffb0abd7SSunil Goutham u64 reg, head;
806ea9dd2e5SSuman Ghosh int ret;
807ffb0abd7SSunil Goutham
808ffb0abd7SSunil Goutham result = (struct nix_aq_res_s *)aq->res->base;
809ffb0abd7SSunil Goutham
810ffb0abd7SSunil Goutham /* Get current head pointer where to append this instruction */
811ffb0abd7SSunil Goutham reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
812ffb0abd7SSunil Goutham head = (reg >> 4) & AQ_PTR_MASK;
813ffb0abd7SSunil Goutham
814ffb0abd7SSunil Goutham memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
815ffb0abd7SSunil Goutham (void *)inst, aq->inst->entry_sz);
816ffb0abd7SSunil Goutham memset(result, 0, sizeof(*result));
817ffb0abd7SSunil Goutham /* sync into memory */
818ffb0abd7SSunil Goutham wmb();
819ffb0abd7SSunil Goutham
820ffb0abd7SSunil Goutham /* Ring the doorbell and wait for result */
821ffb0abd7SSunil Goutham rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
822ffb0abd7SSunil Goutham while (result->compcode == NIX_AQ_COMP_NOTDONE) {
823ffb0abd7SSunil Goutham cpu_relax();
824ffb0abd7SSunil Goutham udelay(1);
825ffb0abd7SSunil Goutham timeout--;
826ffb0abd7SSunil Goutham if (!timeout)
827ffb0abd7SSunil Goutham return -EBUSY;
828ffb0abd7SSunil Goutham }
829ffb0abd7SSunil Goutham
830ea9dd2e5SSuman Ghosh if (result->compcode != NIX_AQ_COMP_GOOD) {
831ffb0abd7SSunil Goutham /* TODO: Replace this with some error code */
832ea9dd2e5SSuman Ghosh if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
833ea9dd2e5SSuman Ghosh result->compcode == NIX_AQ_COMP_LOCKERR ||
834ea9dd2e5SSuman Ghosh result->compcode == NIX_AQ_COMP_CTX_POISON) {
835ea9dd2e5SSuman Ghosh ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
836ea9dd2e5SSuman Ghosh ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
837ea9dd2e5SSuman Ghosh ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
838ea9dd2e5SSuman Ghosh ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
839ea9dd2e5SSuman Ghosh if (ret)
840ea9dd2e5SSuman Ghosh dev_err(rvu->dev,
841ea9dd2e5SSuman Ghosh "%s: Not able to unlock cachelines\n", __func__);
842ea9dd2e5SSuman Ghosh }
843ea9dd2e5SSuman Ghosh
844ffb0abd7SSunil Goutham return -EBUSY;
845ea9dd2e5SSuman Ghosh }
846ffb0abd7SSunil Goutham
847ffb0abd7SSunil Goutham return 0;
848ffb0abd7SSunil Goutham }
849ffb0abd7SSunil Goutham
nix_get_aq_req_smq(struct rvu * rvu,struct nix_aq_enq_req * req,u16 * smq,u16 * smq_mask)85029fe7a1bSGeetha sowjanya static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
85129fe7a1bSGeetha sowjanya u16 *smq, u16 *smq_mask)
85229fe7a1bSGeetha sowjanya {
85329fe7a1bSGeetha sowjanya struct nix_cn10k_aq_enq_req *aq_req;
85429fe7a1bSGeetha sowjanya
85529fe7a1bSGeetha sowjanya if (!is_rvu_otx2(rvu)) {
85629fe7a1bSGeetha sowjanya aq_req = (struct nix_cn10k_aq_enq_req *)req;
85729fe7a1bSGeetha sowjanya *smq = aq_req->sq.smq;
85829fe7a1bSGeetha sowjanya *smq_mask = aq_req->sq_mask.smq;
85929fe7a1bSGeetha sowjanya } else {
86029fe7a1bSGeetha sowjanya *smq = req->sq.smq;
86129fe7a1bSGeetha sowjanya *smq_mask = req->sq_mask.smq;
86229fe7a1bSGeetha sowjanya }
86329fe7a1bSGeetha sowjanya }
86429fe7a1bSGeetha sowjanya
rvu_nix_blk_aq_enq_inst(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)86555efcc57SSubbaraya Sundeep static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
86655efcc57SSubbaraya Sundeep struct nix_aq_enq_req *req,
867ffb0abd7SSunil Goutham struct nix_aq_enq_rsp *rsp)
868ffb0abd7SSunil Goutham {
869ffb0abd7SSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
870ffb0abd7SSunil Goutham u16 pcifunc = req->hdr.pcifunc;
871ffb0abd7SSunil Goutham int nixlf, blkaddr, rc = 0;
872ffb0abd7SSunil Goutham struct nix_aq_inst_s inst;
873ffb0abd7SSunil Goutham struct rvu_block *block;
874ffb0abd7SSunil Goutham struct admin_queue *aq;
875ffb0abd7SSunil Goutham struct rvu_pfvf *pfvf;
87629fe7a1bSGeetha sowjanya u16 smq, smq_mask;
877ffb0abd7SSunil Goutham void *ctx, *mask;
878557dd485SGeetha sowjanya bool ena;
879ffb0abd7SSunil Goutham u64 cfg;
880ffb0abd7SSunil Goutham
88155efcc57SSubbaraya Sundeep blkaddr = nix_hw->blkaddr;
882ffb0abd7SSunil Goutham block = &hw->block[blkaddr];
883ffb0abd7SSunil Goutham aq = block->aq;
884ffb0abd7SSunil Goutham if (!aq) {
885ffb0abd7SSunil Goutham dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
886ffb0abd7SSunil Goutham return NIX_AF_ERR_AQ_ENQUEUE;
887ffb0abd7SSunil Goutham }
888ffb0abd7SSunil Goutham
889c5e4e4d1SSunil Goutham pfvf = rvu_get_pfvf(rvu, pcifunc);
890ffb0abd7SSunil Goutham nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
891c5e4e4d1SSunil Goutham
892e8e095b3SSunil Goutham /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
893e8e095b3SSunil Goutham * operations done by AF itself.
894e8e095b3SSunil Goutham */
895e8e095b3SSunil Goutham if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
896e8e095b3SSunil Goutham (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
897c5e4e4d1SSunil Goutham if (!pfvf->nixlf || nixlf < 0)
898ffb0abd7SSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
899c5e4e4d1SSunil Goutham }
900ffb0abd7SSunil Goutham
901ffb0abd7SSunil Goutham switch (req->ctype) {
902ffb0abd7SSunil Goutham case NIX_AQ_CTYPE_RQ:
903ffb0abd7SSunil Goutham /* Check if index exceeds max no of queues */
904ffb0abd7SSunil Goutham if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
905ffb0abd7SSunil Goutham rc = NIX_AF_ERR_AQ_ENQUEUE;
906ffb0abd7SSunil Goutham break;
907ffb0abd7SSunil Goutham case NIX_AQ_CTYPE_SQ:
908ffb0abd7SSunil Goutham if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
909ffb0abd7SSunil Goutham rc = NIX_AF_ERR_AQ_ENQUEUE;
910ffb0abd7SSunil Goutham break;
911ffb0abd7SSunil Goutham case NIX_AQ_CTYPE_CQ:
912ffb0abd7SSunil Goutham if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
913ffb0abd7SSunil Goutham rc = NIX_AF_ERR_AQ_ENQUEUE;
914ffb0abd7SSunil Goutham break;
915ffb0abd7SSunil Goutham case NIX_AQ_CTYPE_RSS:
916ffb0abd7SSunil Goutham /* Check if RSS is enabled and qidx is within range */
917ffb0abd7SSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
918ffb0abd7SSunil Goutham if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
919ffb0abd7SSunil Goutham (req->qidx >= (256UL << (cfg & 0xF))))
920ffb0abd7SSunil Goutham rc = NIX_AF_ERR_AQ_ENQUEUE;
921ffb0abd7SSunil Goutham break;
92252d3d327SSunil Goutham case NIX_AQ_CTYPE_MCE:
92352d3d327SSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
924221f3dffSRakesh Babu
92552d3d327SSunil Goutham /* Check if index exceeds MCE list length */
926221f3dffSRakesh Babu if (!nix_hw->mcast.mce_ctx ||
92752d3d327SSunil Goutham (req->qidx >= (256UL << (cfg & 0xF))))
92852d3d327SSunil Goutham rc = NIX_AF_ERR_AQ_ENQUEUE;
92952d3d327SSunil Goutham
93052d3d327SSunil Goutham /* Adding multicast lists for requests from PF/VFs is not
93152d3d327SSunil Goutham * yet supported, so ignore this.
93252d3d327SSunil Goutham */
93352d3d327SSunil Goutham if (rsp)
93452d3d327SSunil Goutham rc = NIX_AF_ERR_AQ_ENQUEUE;
93552d3d327SSunil Goutham break;
936e8e095b3SSunil Goutham case NIX_AQ_CTYPE_BANDPROF:
937e8e095b3SSunil Goutham if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
938e8e095b3SSunil Goutham nix_hw, pcifunc))
939e8e095b3SSunil Goutham rc = NIX_AF_ERR_INVALID_BANDPROF;
940e8e095b3SSunil Goutham break;
941ffb0abd7SSunil Goutham default:
942ffb0abd7SSunil Goutham rc = NIX_AF_ERR_AQ_ENQUEUE;
943ffb0abd7SSunil Goutham }
944ffb0abd7SSunil Goutham
945ffb0abd7SSunil Goutham if (rc)
946ffb0abd7SSunil Goutham return rc;
947ffb0abd7SSunil Goutham
94829fe7a1bSGeetha sowjanya nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
949ffb0abd7SSunil Goutham /* Check if SQ pointed SMQ belongs to this PF/VF or not */
950ffb0abd7SSunil Goutham if (req->ctype == NIX_AQ_CTYPE_SQ &&
95126dda7daSNithin Dabilpuram ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
95226dda7daSNithin Dabilpuram (req->op == NIX_AQ_INSTOP_WRITE &&
95329fe7a1bSGeetha sowjanya req->sq_mask.ena && req->sq.ena && smq_mask))) {
954ffb0abd7SSunil Goutham if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
95529fe7a1bSGeetha sowjanya pcifunc, smq))
956ffb0abd7SSunil Goutham return NIX_AF_ERR_AQ_ENQUEUE;
957ffb0abd7SSunil Goutham }
958ffb0abd7SSunil Goutham
959ffb0abd7SSunil Goutham memset(&inst, 0, sizeof(struct nix_aq_inst_s));
960ffb0abd7SSunil Goutham inst.lf = nixlf;
961ffb0abd7SSunil Goutham inst.cindex = req->qidx;
962ffb0abd7SSunil Goutham inst.ctype = req->ctype;
963ffb0abd7SSunil Goutham inst.op = req->op;
964ffb0abd7SSunil Goutham /* Currently we are not supporting enqueuing multiple instructions,
965ffb0abd7SSunil Goutham * so always choose first entry in result memory.
966ffb0abd7SSunil Goutham */
967ffb0abd7SSunil Goutham inst.res_addr = (u64)aq->res->iova;
968ffb0abd7SSunil Goutham
96927150bc4SGeetha sowjanya /* Hardware uses same aq->res->base for updating result of
97027150bc4SGeetha sowjanya * previous instruction hence wait here till it is done.
97127150bc4SGeetha sowjanya */
97227150bc4SGeetha sowjanya spin_lock(&aq->lock);
97327150bc4SGeetha sowjanya
974ffb0abd7SSunil Goutham /* Clean result + context memory */
975ffb0abd7SSunil Goutham memset(aq->res->base, 0, aq->res->entry_sz);
976ffb0abd7SSunil Goutham /* Context needs to be written at RES_ADDR + 128 */
977ffb0abd7SSunil Goutham ctx = aq->res->base + 128;
978ffb0abd7SSunil Goutham /* Mask needs to be written at RES_ADDR + 256 */
979ffb0abd7SSunil Goutham mask = aq->res->base + 256;
980ffb0abd7SSunil Goutham
981ffb0abd7SSunil Goutham switch (req->op) {
982ffb0abd7SSunil Goutham case NIX_AQ_INSTOP_WRITE:
983ffb0abd7SSunil Goutham if (req->ctype == NIX_AQ_CTYPE_RQ)
984ffb0abd7SSunil Goutham memcpy(mask, &req->rq_mask,
985ffb0abd7SSunil Goutham sizeof(struct nix_rq_ctx_s));
986ffb0abd7SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_SQ)
987ffb0abd7SSunil Goutham memcpy(mask, &req->sq_mask,
988ffb0abd7SSunil Goutham sizeof(struct nix_sq_ctx_s));
989ffb0abd7SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_CQ)
990ffb0abd7SSunil Goutham memcpy(mask, &req->cq_mask,
991ffb0abd7SSunil Goutham sizeof(struct nix_cq_ctx_s));
992ffb0abd7SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_RSS)
993ffb0abd7SSunil Goutham memcpy(mask, &req->rss_mask,
994ffb0abd7SSunil Goutham sizeof(struct nix_rsse_s));
99552d3d327SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_MCE)
99652d3d327SSunil Goutham memcpy(mask, &req->mce_mask,
99752d3d327SSunil Goutham sizeof(struct nix_rx_mce_s));
998e8e095b3SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
999e8e095b3SSunil Goutham memcpy(mask, &req->prof_mask,
1000e8e095b3SSunil Goutham sizeof(struct nix_bandprof_s));
1001df561f66SGustavo A. R. Silva fallthrough;
1002ffb0abd7SSunil Goutham case NIX_AQ_INSTOP_INIT:
1003ffb0abd7SSunil Goutham if (req->ctype == NIX_AQ_CTYPE_RQ)
1004ffb0abd7SSunil Goutham memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
1005ffb0abd7SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_SQ)
1006ffb0abd7SSunil Goutham memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
1007ffb0abd7SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_CQ)
1008ffb0abd7SSunil Goutham memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
1009ffb0abd7SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_RSS)
1010ffb0abd7SSunil Goutham memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
101152d3d327SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_MCE)
101252d3d327SSunil Goutham memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
1013e8e095b3SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1014e8e095b3SSunil Goutham memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
1015ffb0abd7SSunil Goutham break;
1016ffb0abd7SSunil Goutham case NIX_AQ_INSTOP_NOP:
1017ffb0abd7SSunil Goutham case NIX_AQ_INSTOP_READ:
1018ffb0abd7SSunil Goutham case NIX_AQ_INSTOP_LOCK:
1019ffb0abd7SSunil Goutham case NIX_AQ_INSTOP_UNLOCK:
1020ffb0abd7SSunil Goutham break;
1021ffb0abd7SSunil Goutham default:
1022ffb0abd7SSunil Goutham rc = NIX_AF_ERR_AQ_ENQUEUE;
102327150bc4SGeetha sowjanya spin_unlock(&aq->lock);
1024ffb0abd7SSunil Goutham return rc;
1025ffb0abd7SSunil Goutham }
1026ffb0abd7SSunil Goutham
1027ffb0abd7SSunil Goutham /* Submit the instruction to AQ */
1028ffb0abd7SSunil Goutham rc = nix_aq_enqueue_wait(rvu, block, &inst);
1029ffb0abd7SSunil Goutham if (rc) {
1030ffb0abd7SSunil Goutham spin_unlock(&aq->lock);
1031ffb0abd7SSunil Goutham return rc;
1032ffb0abd7SSunil Goutham }
1033ffb0abd7SSunil Goutham
1034557dd485SGeetha sowjanya /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1035557dd485SGeetha sowjanya if (req->op == NIX_AQ_INSTOP_INIT) {
1036557dd485SGeetha sowjanya if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
1037557dd485SGeetha sowjanya __set_bit(req->qidx, pfvf->rq_bmap);
1038557dd485SGeetha sowjanya if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1039557dd485SGeetha sowjanya __set_bit(req->qidx, pfvf->sq_bmap);
1040557dd485SGeetha sowjanya if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1041557dd485SGeetha sowjanya __set_bit(req->qidx, pfvf->cq_bmap);
1042557dd485SGeetha sowjanya }
1043557dd485SGeetha sowjanya
1044557dd485SGeetha sowjanya if (req->op == NIX_AQ_INSTOP_WRITE) {
1045557dd485SGeetha sowjanya if (req->ctype == NIX_AQ_CTYPE_RQ) {
1046557dd485SGeetha sowjanya ena = (req->rq.ena & req->rq_mask.ena) |
1047557dd485SGeetha sowjanya (test_bit(req->qidx, pfvf->rq_bmap) &
1048557dd485SGeetha sowjanya ~req->rq_mask.ena);
1049557dd485SGeetha sowjanya if (ena)
1050557dd485SGeetha sowjanya __set_bit(req->qidx, pfvf->rq_bmap);
1051557dd485SGeetha sowjanya else
1052557dd485SGeetha sowjanya __clear_bit(req->qidx, pfvf->rq_bmap);
1053557dd485SGeetha sowjanya }
1054557dd485SGeetha sowjanya if (req->ctype == NIX_AQ_CTYPE_SQ) {
1055557dd485SGeetha sowjanya ena = (req->rq.ena & req->sq_mask.ena) |
1056557dd485SGeetha sowjanya (test_bit(req->qidx, pfvf->sq_bmap) &
1057557dd485SGeetha sowjanya ~req->sq_mask.ena);
1058557dd485SGeetha sowjanya if (ena)
1059557dd485SGeetha sowjanya __set_bit(req->qidx, pfvf->sq_bmap);
1060557dd485SGeetha sowjanya else
1061557dd485SGeetha sowjanya __clear_bit(req->qidx, pfvf->sq_bmap);
1062557dd485SGeetha sowjanya }
1063557dd485SGeetha sowjanya if (req->ctype == NIX_AQ_CTYPE_CQ) {
1064557dd485SGeetha sowjanya ena = (req->rq.ena & req->cq_mask.ena) |
1065557dd485SGeetha sowjanya (test_bit(req->qidx, pfvf->cq_bmap) &
1066557dd485SGeetha sowjanya ~req->cq_mask.ena);
1067557dd485SGeetha sowjanya if (ena)
1068557dd485SGeetha sowjanya __set_bit(req->qidx, pfvf->cq_bmap);
1069557dd485SGeetha sowjanya else
1070557dd485SGeetha sowjanya __clear_bit(req->qidx, pfvf->cq_bmap);
1071557dd485SGeetha sowjanya }
1072557dd485SGeetha sowjanya }
1073557dd485SGeetha sowjanya
1074ffb0abd7SSunil Goutham if (rsp) {
1075ffb0abd7SSunil Goutham /* Copy read context into mailbox */
1076557dd485SGeetha sowjanya if (req->op == NIX_AQ_INSTOP_READ) {
1077ffb0abd7SSunil Goutham if (req->ctype == NIX_AQ_CTYPE_RQ)
1078ffb0abd7SSunil Goutham memcpy(&rsp->rq, ctx,
1079ffb0abd7SSunil Goutham sizeof(struct nix_rq_ctx_s));
1080ffb0abd7SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_SQ)
1081ffb0abd7SSunil Goutham memcpy(&rsp->sq, ctx,
1082ffb0abd7SSunil Goutham sizeof(struct nix_sq_ctx_s));
1083ffb0abd7SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_CQ)
1084ffb0abd7SSunil Goutham memcpy(&rsp->cq, ctx,
1085ffb0abd7SSunil Goutham sizeof(struct nix_cq_ctx_s));
1086ffb0abd7SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_RSS)
1087ffb0abd7SSunil Goutham memcpy(&rsp->rss, ctx,
1088cdaa18f9SDan Carpenter sizeof(struct nix_rsse_s));
108952d3d327SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_MCE)
109052d3d327SSunil Goutham memcpy(&rsp->mce, ctx,
109152d3d327SSunil Goutham sizeof(struct nix_rx_mce_s));
1092e8e095b3SSunil Goutham else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1093e8e095b3SSunil Goutham memcpy(&rsp->prof, ctx,
1094e8e095b3SSunil Goutham sizeof(struct nix_bandprof_s));
1095ffb0abd7SSunil Goutham }
1096ffb0abd7SSunil Goutham }
1097ffb0abd7SSunil Goutham
1098ffb0abd7SSunil Goutham spin_unlock(&aq->lock);
1099557dd485SGeetha sowjanya return 0;
1100557dd485SGeetha sowjanya }
1101557dd485SGeetha sowjanya
rvu_nix_verify_aq_ctx(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,u8 ctype)110214e94f94SHariprasad Kelam static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
110314e94f94SHariprasad Kelam struct nix_aq_enq_req *req, u8 ctype)
110414e94f94SHariprasad Kelam {
110514e94f94SHariprasad Kelam struct nix_cn10k_aq_enq_req aq_req;
110614e94f94SHariprasad Kelam struct nix_cn10k_aq_enq_rsp aq_rsp;
110714e94f94SHariprasad Kelam int rc, word;
110814e94f94SHariprasad Kelam
110914e94f94SHariprasad Kelam if (req->ctype != NIX_AQ_CTYPE_CQ)
111014e94f94SHariprasad Kelam return 0;
111114e94f94SHariprasad Kelam
111214e94f94SHariprasad Kelam rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
111314e94f94SHariprasad Kelam req->hdr.pcifunc, ctype, req->qidx);
111414e94f94SHariprasad Kelam if (rc) {
111514e94f94SHariprasad Kelam dev_err(rvu->dev,
111614e94f94SHariprasad Kelam "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
111714e94f94SHariprasad Kelam __func__, nix_get_ctx_name(ctype), req->qidx,
111814e94f94SHariprasad Kelam req->hdr.pcifunc);
111914e94f94SHariprasad Kelam return rc;
112014e94f94SHariprasad Kelam }
112114e94f94SHariprasad Kelam
112214e94f94SHariprasad Kelam /* Make copy of original context & mask which are required
112314e94f94SHariprasad Kelam * for resubmission
112414e94f94SHariprasad Kelam */
112514e94f94SHariprasad Kelam memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
112614e94f94SHariprasad Kelam memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
112714e94f94SHariprasad Kelam
112814e94f94SHariprasad Kelam /* exclude fields which HW can update */
112914e94f94SHariprasad Kelam aq_req.cq_mask.cq_err = 0;
113014e94f94SHariprasad Kelam aq_req.cq_mask.wrptr = 0;
113114e94f94SHariprasad Kelam aq_req.cq_mask.tail = 0;
113214e94f94SHariprasad Kelam aq_req.cq_mask.head = 0;
113314e94f94SHariprasad Kelam aq_req.cq_mask.avg_level = 0;
113414e94f94SHariprasad Kelam aq_req.cq_mask.update_time = 0;
113514e94f94SHariprasad Kelam aq_req.cq_mask.substream = 0;
113614e94f94SHariprasad Kelam
113714e94f94SHariprasad Kelam /* Context mask (cq_mask) holds mask value of fields which
113814e94f94SHariprasad Kelam * are changed in AQ WRITE operation.
113914e94f94SHariprasad Kelam * for example cq.drop = 0xa;
114014e94f94SHariprasad Kelam * cq_mask.drop = 0xff;
114114e94f94SHariprasad Kelam * Below logic performs '&' between cq and cq_mask so that non
114214e94f94SHariprasad Kelam * updated fields are masked out for request and response
114314e94f94SHariprasad Kelam * comparison
114414e94f94SHariprasad Kelam */
114514e94f94SHariprasad Kelam for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
114614e94f94SHariprasad Kelam word++) {
114714e94f94SHariprasad Kelam *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
114814e94f94SHariprasad Kelam (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
114914e94f94SHariprasad Kelam *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
115014e94f94SHariprasad Kelam (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
115114e94f94SHariprasad Kelam }
115214e94f94SHariprasad Kelam
115314e94f94SHariprasad Kelam if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
115414e94f94SHariprasad Kelam return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
115514e94f94SHariprasad Kelam
115614e94f94SHariprasad Kelam return 0;
115714e94f94SHariprasad Kelam }
115814e94f94SHariprasad Kelam
rvu_nix_aq_enq_inst(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)115955efcc57SSubbaraya Sundeep static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
116055efcc57SSubbaraya Sundeep struct nix_aq_enq_rsp *rsp)
116155efcc57SSubbaraya Sundeep {
116255efcc57SSubbaraya Sundeep struct nix_hw *nix_hw;
116314e94f94SHariprasad Kelam int err, retries = 5;
116455efcc57SSubbaraya Sundeep int blkaddr;
116555efcc57SSubbaraya Sundeep
116655efcc57SSubbaraya Sundeep blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
116755efcc57SSubbaraya Sundeep if (blkaddr < 0)
116855efcc57SSubbaraya Sundeep return NIX_AF_ERR_AF_LF_INVALID;
116955efcc57SSubbaraya Sundeep
117055efcc57SSubbaraya Sundeep nix_hw = get_nix_hw(rvu->hw, blkaddr);
117155efcc57SSubbaraya Sundeep if (!nix_hw)
11727278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
117355efcc57SSubbaraya Sundeep
117414e94f94SHariprasad Kelam retry:
117514e94f94SHariprasad Kelam err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
117614e94f94SHariprasad Kelam
117714e94f94SHariprasad Kelam /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
117814e94f94SHariprasad Kelam * As a work around perfrom CQ context read after each AQ write. If AQ
117914e94f94SHariprasad Kelam * read shows AQ write is not updated perform AQ write again.
118014e94f94SHariprasad Kelam */
118114e94f94SHariprasad Kelam if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
118214e94f94SHariprasad Kelam err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
118314e94f94SHariprasad Kelam if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
118414e94f94SHariprasad Kelam if (retries--)
118514e94f94SHariprasad Kelam goto retry;
118614e94f94SHariprasad Kelam else
118714e94f94SHariprasad Kelam return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
118814e94f94SHariprasad Kelam }
118914e94f94SHariprasad Kelam }
119014e94f94SHariprasad Kelam
119114e94f94SHariprasad Kelam return err;
119255efcc57SSubbaraya Sundeep }
119355efcc57SSubbaraya Sundeep
nix_get_ctx_name(int ctype)1194a0291766SSunil Goutham static const char *nix_get_ctx_name(int ctype)
1195a0291766SSunil Goutham {
1196a0291766SSunil Goutham switch (ctype) {
1197a0291766SSunil Goutham case NIX_AQ_CTYPE_CQ:
1198a0291766SSunil Goutham return "CQ";
1199a0291766SSunil Goutham case NIX_AQ_CTYPE_SQ:
1200a0291766SSunil Goutham return "SQ";
1201a0291766SSunil Goutham case NIX_AQ_CTYPE_RQ:
1202a0291766SSunil Goutham return "RQ";
1203a0291766SSunil Goutham case NIX_AQ_CTYPE_RSS:
1204a0291766SSunil Goutham return "RSS";
1205a0291766SSunil Goutham }
1206a0291766SSunil Goutham return "";
1207a0291766SSunil Goutham }
1208a0291766SSunil Goutham
nix_lf_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req)1209557dd485SGeetha sowjanya static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1210557dd485SGeetha sowjanya {
1211557dd485SGeetha sowjanya struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1212557dd485SGeetha sowjanya struct nix_aq_enq_req aq_req;
1213557dd485SGeetha sowjanya unsigned long *bmap;
1214557dd485SGeetha sowjanya int qidx, q_cnt = 0;
1215557dd485SGeetha sowjanya int err = 0, rc;
1216557dd485SGeetha sowjanya
1217557dd485SGeetha sowjanya if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1218557dd485SGeetha sowjanya return NIX_AF_ERR_AQ_ENQUEUE;
1219557dd485SGeetha sowjanya
1220557dd485SGeetha sowjanya memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1221557dd485SGeetha sowjanya aq_req.hdr.pcifunc = req->hdr.pcifunc;
1222557dd485SGeetha sowjanya
1223557dd485SGeetha sowjanya if (req->ctype == NIX_AQ_CTYPE_CQ) {
1224557dd485SGeetha sowjanya aq_req.cq.ena = 0;
1225557dd485SGeetha sowjanya aq_req.cq_mask.ena = 1;
122627150bc4SGeetha sowjanya aq_req.cq.bp_ena = 0;
122727150bc4SGeetha sowjanya aq_req.cq_mask.bp_ena = 1;
1228557dd485SGeetha sowjanya q_cnt = pfvf->cq_ctx->qsize;
1229557dd485SGeetha sowjanya bmap = pfvf->cq_bmap;
1230557dd485SGeetha sowjanya }
1231557dd485SGeetha sowjanya if (req->ctype == NIX_AQ_CTYPE_SQ) {
1232557dd485SGeetha sowjanya aq_req.sq.ena = 0;
1233557dd485SGeetha sowjanya aq_req.sq_mask.ena = 1;
1234557dd485SGeetha sowjanya q_cnt = pfvf->sq_ctx->qsize;
1235557dd485SGeetha sowjanya bmap = pfvf->sq_bmap;
1236557dd485SGeetha sowjanya }
1237557dd485SGeetha sowjanya if (req->ctype == NIX_AQ_CTYPE_RQ) {
1238557dd485SGeetha sowjanya aq_req.rq.ena = 0;
1239557dd485SGeetha sowjanya aq_req.rq_mask.ena = 1;
1240557dd485SGeetha sowjanya q_cnt = pfvf->rq_ctx->qsize;
1241557dd485SGeetha sowjanya bmap = pfvf->rq_bmap;
1242557dd485SGeetha sowjanya }
1243557dd485SGeetha sowjanya
1244557dd485SGeetha sowjanya aq_req.ctype = req->ctype;
1245557dd485SGeetha sowjanya aq_req.op = NIX_AQ_INSTOP_WRITE;
1246557dd485SGeetha sowjanya
1247557dd485SGeetha sowjanya for (qidx = 0; qidx < q_cnt; qidx++) {
1248557dd485SGeetha sowjanya if (!test_bit(qidx, bmap))
1249557dd485SGeetha sowjanya continue;
1250557dd485SGeetha sowjanya aq_req.qidx = qidx;
1251557dd485SGeetha sowjanya rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1252557dd485SGeetha sowjanya if (rc) {
1253557dd485SGeetha sowjanya err = rc;
1254557dd485SGeetha sowjanya dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1255a0291766SSunil Goutham nix_get_ctx_name(req->ctype), qidx);
1256557dd485SGeetha sowjanya }
1257557dd485SGeetha sowjanya }
1258557dd485SGeetha sowjanya
1259557dd485SGeetha sowjanya return err;
1260ffb0abd7SSunil Goutham }
1261ffb0abd7SSunil Goutham
1262a0291766SSunil Goutham #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
nix_lf_hwctx_lockdown(struct rvu * rvu,struct nix_aq_enq_req * req)1263a0291766SSunil Goutham static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1264a0291766SSunil Goutham {
1265a0291766SSunil Goutham struct nix_aq_enq_req lock_ctx_req;
1266a0291766SSunil Goutham int err;
1267a0291766SSunil Goutham
1268a0291766SSunil Goutham if (req->op != NIX_AQ_INSTOP_INIT)
1269a0291766SSunil Goutham return 0;
1270a0291766SSunil Goutham
1271a0291766SSunil Goutham if (req->ctype == NIX_AQ_CTYPE_MCE ||
1272a0291766SSunil Goutham req->ctype == NIX_AQ_CTYPE_DYNO)
1273a0291766SSunil Goutham return 0;
1274a0291766SSunil Goutham
1275a0291766SSunil Goutham memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1276a0291766SSunil Goutham lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1277a0291766SSunil Goutham lock_ctx_req.ctype = req->ctype;
1278a0291766SSunil Goutham lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1279a0291766SSunil Goutham lock_ctx_req.qidx = req->qidx;
1280a0291766SSunil Goutham err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1281a0291766SSunil Goutham if (err)
1282a0291766SSunil Goutham dev_err(rvu->dev,
1283a0291766SSunil Goutham "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1284a0291766SSunil Goutham req->hdr.pcifunc,
1285a0291766SSunil Goutham nix_get_ctx_name(req->ctype), req->qidx);
1286a0291766SSunil Goutham return err;
1287a0291766SSunil Goutham }
1288a0291766SSunil Goutham
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1289eac66686SSunil Goutham int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1290ffb0abd7SSunil Goutham struct nix_aq_enq_req *req,
1291ffb0abd7SSunil Goutham struct nix_aq_enq_rsp *rsp)
1292ffb0abd7SSunil Goutham {
1293a0291766SSunil Goutham int err;
1294a0291766SSunil Goutham
1295a0291766SSunil Goutham err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1296a0291766SSunil Goutham if (!err)
1297a0291766SSunil Goutham err = nix_lf_hwctx_lockdown(rvu, req);
1298a0291766SSunil Goutham return err;
1299a0291766SSunil Goutham }
1300a0291766SSunil Goutham #else
1301a0291766SSunil Goutham
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1302a0291766SSunil Goutham int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1303a0291766SSunil Goutham struct nix_aq_enq_req *req,
1304a0291766SSunil Goutham struct nix_aq_enq_rsp *rsp)
1305a0291766SSunil Goutham {
1306ffb0abd7SSunil Goutham return rvu_nix_aq_enq_inst(rvu, req, rsp);
1307ffb0abd7SSunil Goutham }
1308a0291766SSunil Goutham #endif
130930077d21SGeetha sowjanya /* CN10K mbox handler */
rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu * rvu,struct nix_cn10k_aq_enq_req * req,struct nix_cn10k_aq_enq_rsp * rsp)131030077d21SGeetha sowjanya int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
131130077d21SGeetha sowjanya struct nix_cn10k_aq_enq_req *req,
131230077d21SGeetha sowjanya struct nix_cn10k_aq_enq_rsp *rsp)
131330077d21SGeetha sowjanya {
131430077d21SGeetha sowjanya return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
131530077d21SGeetha sowjanya (struct nix_aq_enq_rsp *)rsp);
131630077d21SGeetha sowjanya }
1317ffb0abd7SSunil Goutham
rvu_mbox_handler_nix_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req,struct msg_rsp * rsp)1318eac66686SSunil Goutham int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1319557dd485SGeetha sowjanya struct hwctx_disable_req *req,
1320557dd485SGeetha sowjanya struct msg_rsp *rsp)
1321557dd485SGeetha sowjanya {
1322557dd485SGeetha sowjanya return nix_lf_hwctx_disable(rvu, req);
1323557dd485SGeetha sowjanya }
1324557dd485SGeetha sowjanya
rvu_mbox_handler_nix_lf_alloc(struct rvu * rvu,struct nix_lf_alloc_req * req,struct nix_lf_alloc_rsp * rsp)1325eac66686SSunil Goutham int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1326cb30711aSSunil Goutham struct nix_lf_alloc_req *req,
1327cb30711aSSunil Goutham struct nix_lf_alloc_rsp *rsp)
1328cb30711aSSunil Goutham {
13298bb991c5STomasz Duszynski int nixlf, qints, hwctx_size, intf, err, rc = 0;
1330cb30711aSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
1331cb30711aSSunil Goutham u16 pcifunc = req->hdr.pcifunc;
1332cb30711aSSunil Goutham struct rvu_block *block;
1333cb30711aSSunil Goutham struct rvu_pfvf *pfvf;
1334cb30711aSSunil Goutham u64 cfg, ctx_cfg;
1335cb30711aSSunil Goutham int blkaddr;
1336cb30711aSSunil Goutham
1337cb30711aSSunil Goutham if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1338cb30711aSSunil Goutham return NIX_AF_ERR_PARAM;
1339cb30711aSSunil Goutham
1340ee1e7591SGeetha sowjanya if (req->way_mask)
1341ee1e7591SGeetha sowjanya req->way_mask &= 0xFFFF;
1342ee1e7591SGeetha sowjanya
1343cb30711aSSunil Goutham pfvf = rvu_get_pfvf(rvu, pcifunc);
1344cb30711aSSunil Goutham blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1345cb30711aSSunil Goutham if (!pfvf->nixlf || blkaddr < 0)
1346cb30711aSSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
1347cb30711aSSunil Goutham
1348cb30711aSSunil Goutham block = &hw->block[blkaddr];
1349cb30711aSSunil Goutham nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1350cb30711aSSunil Goutham if (nixlf < 0)
1351cb30711aSSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
1352cb30711aSSunil Goutham
1353f325d3f4SSunil Goutham /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1354f325d3f4SSunil Goutham if (req->npa_func) {
1355f325d3f4SSunil Goutham /* If default, use 'this' NIXLF's PFFUNC */
1356f325d3f4SSunil Goutham if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1357f325d3f4SSunil Goutham req->npa_func = pcifunc;
1358f325d3f4SSunil Goutham if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1359f325d3f4SSunil Goutham return NIX_AF_INVAL_NPA_PF_FUNC;
1360f325d3f4SSunil Goutham }
1361f325d3f4SSunil Goutham
1362f325d3f4SSunil Goutham /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1363f325d3f4SSunil Goutham if (req->sso_func) {
1364f325d3f4SSunil Goutham /* If default, use 'this' NIXLF's PFFUNC */
1365f325d3f4SSunil Goutham if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1366f325d3f4SSunil Goutham req->sso_func = pcifunc;
1367f325d3f4SSunil Goutham if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1368f325d3f4SSunil Goutham return NIX_AF_INVAL_SSO_PF_FUNC;
1369f325d3f4SSunil Goutham }
1370f325d3f4SSunil Goutham
1371cb30711aSSunil Goutham /* If RSS is being enabled, check if requested config is valid.
1372cb30711aSSunil Goutham * RSS table size should be power of two, otherwise
1373cb30711aSSunil Goutham * RSS_GRP::OFFSET + adder might go beyond that group or
1374cb30711aSSunil Goutham * won't be able to use entire table.
1375cb30711aSSunil Goutham */
1376cb30711aSSunil Goutham if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1377cb30711aSSunil Goutham !is_power_of_2(req->rss_sz)))
1378cb30711aSSunil Goutham return NIX_AF_ERR_RSS_SIZE_INVALID;
1379cb30711aSSunil Goutham
1380cb30711aSSunil Goutham if (req->rss_sz &&
1381cb30711aSSunil Goutham (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1382cb30711aSSunil Goutham return NIX_AF_ERR_RSS_GRPS_INVALID;
1383cb30711aSSunil Goutham
1384cb30711aSSunil Goutham /* Reset this NIX LF */
1385cb30711aSSunil Goutham err = rvu_lf_reset(rvu, block, nixlf);
1386cb30711aSSunil Goutham if (err) {
1387cb30711aSSunil Goutham dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1388cb30711aSSunil Goutham block->addr - BLKADDR_NIX0, nixlf);
1389cb30711aSSunil Goutham return NIX_AF_ERR_LF_RESET;
1390cb30711aSSunil Goutham }
1391cb30711aSSunil Goutham
1392cb30711aSSunil Goutham ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1393cb30711aSSunil Goutham
1394cb30711aSSunil Goutham /* Alloc NIX RQ HW context memory and config the base */
1395cb30711aSSunil Goutham hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1396cb30711aSSunil Goutham err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1397cb30711aSSunil Goutham if (err)
1398cb30711aSSunil Goutham goto free_mem;
1399cb30711aSSunil Goutham
1400557dd485SGeetha sowjanya pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1401557dd485SGeetha sowjanya if (!pfvf->rq_bmap)
1402557dd485SGeetha sowjanya goto free_mem;
1403557dd485SGeetha sowjanya
1404cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1405cb30711aSSunil Goutham (u64)pfvf->rq_ctx->iova);
1406cb30711aSSunil Goutham
1407cb30711aSSunil Goutham /* Set caching and queue count in HW */
1408ee1e7591SGeetha sowjanya cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1409cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1410cb30711aSSunil Goutham
1411cb30711aSSunil Goutham /* Alloc NIX SQ HW context memory and config the base */
1412cb30711aSSunil Goutham hwctx_size = 1UL << (ctx_cfg & 0xF);
1413cb30711aSSunil Goutham err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1414cb30711aSSunil Goutham if (err)
1415cb30711aSSunil Goutham goto free_mem;
1416cb30711aSSunil Goutham
1417557dd485SGeetha sowjanya pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1418557dd485SGeetha sowjanya if (!pfvf->sq_bmap)
1419557dd485SGeetha sowjanya goto free_mem;
1420557dd485SGeetha sowjanya
1421cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1422cb30711aSSunil Goutham (u64)pfvf->sq_ctx->iova);
1423ee1e7591SGeetha sowjanya
1424ee1e7591SGeetha sowjanya cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1425cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1426cb30711aSSunil Goutham
1427cb30711aSSunil Goutham /* Alloc NIX CQ HW context memory and config the base */
1428cb30711aSSunil Goutham hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1429cb30711aSSunil Goutham err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1430cb30711aSSunil Goutham if (err)
1431cb30711aSSunil Goutham goto free_mem;
1432cb30711aSSunil Goutham
1433557dd485SGeetha sowjanya pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1434557dd485SGeetha sowjanya if (!pfvf->cq_bmap)
1435557dd485SGeetha sowjanya goto free_mem;
1436557dd485SGeetha sowjanya
1437cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1438cb30711aSSunil Goutham (u64)pfvf->cq_ctx->iova);
1439ee1e7591SGeetha sowjanya
1440ee1e7591SGeetha sowjanya cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1441cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1442cb30711aSSunil Goutham
1443cb30711aSSunil Goutham /* Initialize receive side scaling (RSS) */
1444cb30711aSSunil Goutham hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1445ee1e7591SGeetha sowjanya err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
144672e192a1SJerin Jacob req->rss_grps, hwctx_size, req->way_mask,
144772e192a1SJerin Jacob !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1448cb30711aSSunil Goutham if (err)
1449cb30711aSSunil Goutham goto free_mem;
1450cb30711aSSunil Goutham
1451cb30711aSSunil Goutham /* Alloc memory for CQINT's HW contexts */
1452cb30711aSSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1453cb30711aSSunil Goutham qints = (cfg >> 24) & 0xFFF;
1454cb30711aSSunil Goutham hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1455cb30711aSSunil Goutham err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1456cb30711aSSunil Goutham if (err)
1457cb30711aSSunil Goutham goto free_mem;
1458cb30711aSSunil Goutham
1459cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1460cb30711aSSunil Goutham (u64)pfvf->cq_ints_ctx->iova);
1461ee1e7591SGeetha sowjanya
1462ee1e7591SGeetha sowjanya rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1463ee1e7591SGeetha sowjanya BIT_ULL(36) | req->way_mask << 20);
1464cb30711aSSunil Goutham
1465cb30711aSSunil Goutham /* Alloc memory for QINT's HW contexts */
1466cb30711aSSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1467cb30711aSSunil Goutham qints = (cfg >> 12) & 0xFFF;
1468cb30711aSSunil Goutham hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1469cb30711aSSunil Goutham err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1470cb30711aSSunil Goutham if (err)
1471cb30711aSSunil Goutham goto free_mem;
1472cb30711aSSunil Goutham
1473cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1474cb30711aSSunil Goutham (u64)pfvf->nix_qints_ctx->iova);
1475ee1e7591SGeetha sowjanya rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1476ee1e7591SGeetha sowjanya BIT_ULL(36) | req->way_mask << 20);
1477cb30711aSSunil Goutham
1478a253933eSNithin Dabilpuram /* Setup VLANX TPID's.
1479a253933eSNithin Dabilpuram * Use VLAN1 for 802.1Q
1480a253933eSNithin Dabilpuram * and VLAN0 for 802.1AD.
1481a253933eSNithin Dabilpuram */
1482a253933eSNithin Dabilpuram cfg = (0x8100ULL << 16) | 0x88A8ULL;
1483a253933eSNithin Dabilpuram rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1484a253933eSNithin Dabilpuram
1485cb30711aSSunil Goutham /* Enable LMTST for this NIX LF */
1486cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1487cb30711aSSunil Goutham
1488f325d3f4SSunil Goutham /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1489f325d3f4SSunil Goutham if (req->npa_func)
1490cb30711aSSunil Goutham cfg = req->npa_func;
1491f325d3f4SSunil Goutham if (req->sso_func)
1492cb30711aSSunil Goutham cfg |= (u64)req->sso_func << 16;
1493cb30711aSSunil Goutham
1494cb30711aSSunil Goutham cfg |= (u64)req->xqe_sz << 33;
1495cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1496cb30711aSSunil Goutham
1497cb30711aSSunil Goutham /* Config Rx pkt length, csum checks and apad enable / disable */
1498cb30711aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1499cb30711aSSunil Goutham
1500f1517f6fSStanislaw Kardach /* Configure pkind for TX parse config */
1501f1517f6fSStanislaw Kardach cfg = NPC_TX_DEF_PKIND;
1502f1517f6fSStanislaw Kardach rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1503f1517f6fSStanislaw Kardach
15048bb991c5STomasz Duszynski intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1505fe1939bbSRadha Mohan Chintakuntla if (is_sdp_pfvf(pcifunc))
1506fe1939bbSRadha Mohan Chintakuntla intf = NIX_INTF_TYPE_SDP;
1507fe1939bbSRadha Mohan Chintakuntla
1508aefaa8c7SHarman Kalra err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1509aefaa8c7SHarman Kalra !!(req->flags & NIX_LF_LBK_BLK_SEL));
151094d942c5SGeetha sowjanya if (err)
151194d942c5SGeetha sowjanya goto free_mem;
151294d942c5SGeetha sowjanya
151340df309eSSunil Goutham /* Disable NPC entries as NIXLF's contexts are not initialized yet */
151440df309eSSunil Goutham rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
151540df309eSSunil Goutham
1516f0c2982aSNaveen Mamindlapalli /* Configure RX VTAG Type 7 (strip) for vf vlan */
1517f0c2982aSNaveen Mamindlapalli rvu_write64(rvu, blkaddr,
1518f0c2982aSNaveen Mamindlapalli NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1519f0c2982aSNaveen Mamindlapalli VTAGSIZE_T4 | VTAG_STRIP);
1520f0c2982aSNaveen Mamindlapalli
1521cb30711aSSunil Goutham goto exit;
1522cb30711aSSunil Goutham
1523cb30711aSSunil Goutham free_mem:
1524cb30711aSSunil Goutham nix_ctx_free(rvu, pfvf);
1525cb30711aSSunil Goutham rc = -ENOMEM;
1526cb30711aSSunil Goutham
1527cb30711aSSunil Goutham exit:
1528cb30711aSSunil Goutham /* Set macaddr of this PF/VF */
1529cb30711aSSunil Goutham ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1530cb30711aSSunil Goutham
1531cb30711aSSunil Goutham /* set SQB size info */
1532cb30711aSSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1533cb30711aSSunil Goutham rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1534f5721f76SStanislaw Kardach rsp->rx_chan_base = pfvf->rx_chan_base;
1535f5721f76SStanislaw Kardach rsp->tx_chan_base = pfvf->tx_chan_base;
1536f5721f76SStanislaw Kardach rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1537f5721f76SStanislaw Kardach rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
153859360e98SSunil Goutham rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
153959360e98SSunil Goutham rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
154034425e8cSKiran Kumar /* Get HW supported stat count */
154134425e8cSKiran Kumar cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
154234425e8cSKiran Kumar rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
154334425e8cSKiran Kumar rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
154434425e8cSKiran Kumar /* Get count of CQ IRQs and error IRQs supported per LF */
154534425e8cSKiran Kumar cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
154634425e8cSKiran Kumar rsp->qints = ((cfg >> 12) & 0xFFF);
154734425e8cSKiran Kumar rsp->cints = ((cfg >> 24) & 0xFFF);
1548a84cdceaSSubbaraya Sundeep rsp->cgx_links = hw->cgx_links;
1549a84cdceaSSubbaraya Sundeep rsp->lbk_links = hw->lbk_links;
1550a84cdceaSSubbaraya Sundeep rsp->sdp_links = hw->sdp_links;
1551a84cdceaSSubbaraya Sundeep
1552cb30711aSSunil Goutham return rc;
1553cb30711aSSunil Goutham }
1554cb30711aSSunil Goutham
rvu_mbox_handler_nix_lf_free(struct rvu * rvu,struct nix_lf_free_req * req,struct msg_rsp * rsp)155555307fcbSSubbaraya Sundeep int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1556cb30711aSSunil Goutham struct msg_rsp *rsp)
1557cb30711aSSunil Goutham {
1558cb30711aSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
1559cb30711aSSunil Goutham u16 pcifunc = req->hdr.pcifunc;
1560cb30711aSSunil Goutham struct rvu_block *block;
1561cb30711aSSunil Goutham int blkaddr, nixlf, err;
1562cb30711aSSunil Goutham struct rvu_pfvf *pfvf;
1563cb30711aSSunil Goutham
1564cb30711aSSunil Goutham pfvf = rvu_get_pfvf(rvu, pcifunc);
1565cb30711aSSunil Goutham blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1566cb30711aSSunil Goutham if (!pfvf->nixlf || blkaddr < 0)
1567cb30711aSSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
1568cb30711aSSunil Goutham
1569cb30711aSSunil Goutham block = &hw->block[blkaddr];
1570cb30711aSSunil Goutham nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1571cb30711aSSunil Goutham if (nixlf < 0)
1572cb30711aSSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
1573cb30711aSSunil Goutham
157455307fcbSSubbaraya Sundeep if (req->flags & NIX_LF_DISABLE_FLOWS)
157555307fcbSSubbaraya Sundeep rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
157655307fcbSSubbaraya Sundeep else
157755307fcbSSubbaraya Sundeep rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
157855307fcbSSubbaraya Sundeep
15799a946defSVamsi Attunuru /* Free any tx vtag def entries used by this NIX LF */
15809a946defSVamsi Attunuru if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
15819a946defSVamsi Attunuru nix_free_tx_vtag_entries(rvu, pcifunc);
15829a946defSVamsi Attunuru
15834b05528eSSunil Goutham nix_interface_deinit(rvu, pcifunc, nixlf);
15844b05528eSSunil Goutham
1585cb30711aSSunil Goutham /* Reset this NIX LF */
1586cb30711aSSunil Goutham err = rvu_lf_reset(rvu, block, nixlf);
1587cb30711aSSunil Goutham if (err) {
1588cb30711aSSunil Goutham dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1589cb30711aSSunil Goutham block->addr - BLKADDR_NIX0, nixlf);
1590cb30711aSSunil Goutham return NIX_AF_ERR_LF_RESET;
1591cb30711aSSunil Goutham }
1592cb30711aSSunil Goutham
1593cb30711aSSunil Goutham nix_ctx_free(rvu, pfvf);
1594cb30711aSSunil Goutham
1595cb30711aSSunil Goutham return 0;
1596cb30711aSSunil Goutham }
1597cb30711aSSunil Goutham
rvu_mbox_handler_nix_mark_format_cfg(struct rvu * rvu,struct nix_mark_format_cfg * req,struct nix_mark_format_cfg_rsp * rsp)1598a27d7659SKrzysztof Kanas int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1599a27d7659SKrzysztof Kanas struct nix_mark_format_cfg *req,
1600a27d7659SKrzysztof Kanas struct nix_mark_format_cfg_rsp *rsp)
1601a27d7659SKrzysztof Kanas {
1602a27d7659SKrzysztof Kanas u16 pcifunc = req->hdr.pcifunc;
1603a27d7659SKrzysztof Kanas struct nix_hw *nix_hw;
1604a27d7659SKrzysztof Kanas struct rvu_pfvf *pfvf;
1605a27d7659SKrzysztof Kanas int blkaddr, rc;
1606a27d7659SKrzysztof Kanas u32 cfg;
1607a27d7659SKrzysztof Kanas
1608a27d7659SKrzysztof Kanas pfvf = rvu_get_pfvf(rvu, pcifunc);
1609a27d7659SKrzysztof Kanas blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1610a27d7659SKrzysztof Kanas if (!pfvf->nixlf || blkaddr < 0)
1611a27d7659SKrzysztof Kanas return NIX_AF_ERR_AF_LF_INVALID;
1612a27d7659SKrzysztof Kanas
1613a27d7659SKrzysztof Kanas nix_hw = get_nix_hw(rvu->hw, blkaddr);
1614a27d7659SKrzysztof Kanas if (!nix_hw)
16157278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
1616a27d7659SKrzysztof Kanas
1617a27d7659SKrzysztof Kanas cfg = (((u32)req->offset & 0x7) << 16) |
1618a27d7659SKrzysztof Kanas (((u32)req->y_mask & 0xF) << 12) |
1619a27d7659SKrzysztof Kanas (((u32)req->y_val & 0xF) << 8) |
1620a27d7659SKrzysztof Kanas (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1621a27d7659SKrzysztof Kanas
1622a27d7659SKrzysztof Kanas rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1623a27d7659SKrzysztof Kanas if (rc < 0) {
1624a27d7659SKrzysztof Kanas dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1625a27d7659SKrzysztof Kanas rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1626a27d7659SKrzysztof Kanas return NIX_AF_ERR_MARK_CFG_FAIL;
1627a27d7659SKrzysztof Kanas }
1628a27d7659SKrzysztof Kanas
1629a27d7659SKrzysztof Kanas rsp->mark_format_idx = rc;
1630a27d7659SKrzysztof Kanas return 0;
1631a27d7659SKrzysztof Kanas }
1632a27d7659SKrzysztof Kanas
1633d0641163SNithin Dabilpuram /* Handle shaper update specially for few revisions */
1634d0641163SNithin Dabilpuram static bool
handle_txschq_shaper_update(struct rvu * rvu,int blkaddr,int nixlf,int lvl,u64 reg,u64 regval)1635d0641163SNithin Dabilpuram handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1636d0641163SNithin Dabilpuram int lvl, u64 reg, u64 regval)
1637d0641163SNithin Dabilpuram {
1638d0641163SNithin Dabilpuram u64 regbase, oldval, sw_xoff = 0;
1639d0641163SNithin Dabilpuram u64 dbgval, md_debug0 = 0;
1640d0641163SNithin Dabilpuram unsigned long poll_tmo;
1641d0641163SNithin Dabilpuram bool rate_reg = 0;
1642d0641163SNithin Dabilpuram u32 schq;
1643d0641163SNithin Dabilpuram
1644d0641163SNithin Dabilpuram regbase = reg & 0xFFFF;
1645d0641163SNithin Dabilpuram schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1646d0641163SNithin Dabilpuram
1647d0641163SNithin Dabilpuram /* Check for rate register */
1648d0641163SNithin Dabilpuram switch (lvl) {
1649d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_TL1:
1650d0641163SNithin Dabilpuram md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1651d0641163SNithin Dabilpuram sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1652d0641163SNithin Dabilpuram
1653d0641163SNithin Dabilpuram rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1654d0641163SNithin Dabilpuram break;
1655d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_TL2:
1656d0641163SNithin Dabilpuram md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1657d0641163SNithin Dabilpuram sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1658d0641163SNithin Dabilpuram
1659d0641163SNithin Dabilpuram rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1660d0641163SNithin Dabilpuram regbase == NIX_AF_TL2X_PIR(0));
1661d0641163SNithin Dabilpuram break;
1662d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_TL3:
1663d0641163SNithin Dabilpuram md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1664d0641163SNithin Dabilpuram sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1665d0641163SNithin Dabilpuram
1666d0641163SNithin Dabilpuram rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1667d0641163SNithin Dabilpuram regbase == NIX_AF_TL3X_PIR(0));
1668d0641163SNithin Dabilpuram break;
1669d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_TL4:
1670d0641163SNithin Dabilpuram md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1671d0641163SNithin Dabilpuram sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1672d0641163SNithin Dabilpuram
1673d0641163SNithin Dabilpuram rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1674d0641163SNithin Dabilpuram regbase == NIX_AF_TL4X_PIR(0));
1675d0641163SNithin Dabilpuram break;
1676d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_MDQ:
1677d0641163SNithin Dabilpuram sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1678d0641163SNithin Dabilpuram rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1679d0641163SNithin Dabilpuram regbase == NIX_AF_MDQX_PIR(0));
1680d0641163SNithin Dabilpuram break;
1681d0641163SNithin Dabilpuram }
1682d0641163SNithin Dabilpuram
1683d0641163SNithin Dabilpuram if (!rate_reg)
1684d0641163SNithin Dabilpuram return false;
1685d0641163SNithin Dabilpuram
1686d0641163SNithin Dabilpuram /* Nothing special to do when state is not toggled */
1687d0641163SNithin Dabilpuram oldval = rvu_read64(rvu, blkaddr, reg);
1688d0641163SNithin Dabilpuram if ((oldval & 0x1) == (regval & 0x1)) {
1689d0641163SNithin Dabilpuram rvu_write64(rvu, blkaddr, reg, regval);
1690d0641163SNithin Dabilpuram return true;
1691d0641163SNithin Dabilpuram }
1692d0641163SNithin Dabilpuram
1693d0641163SNithin Dabilpuram /* PIR/CIR disable */
1694d0641163SNithin Dabilpuram if (!(regval & 0x1)) {
1695d0641163SNithin Dabilpuram rvu_write64(rvu, blkaddr, sw_xoff, 1);
1696d0641163SNithin Dabilpuram rvu_write64(rvu, blkaddr, reg, 0);
1697d0641163SNithin Dabilpuram udelay(4);
1698d0641163SNithin Dabilpuram rvu_write64(rvu, blkaddr, sw_xoff, 0);
1699d0641163SNithin Dabilpuram return true;
1700d0641163SNithin Dabilpuram }
1701d0641163SNithin Dabilpuram
1702d0641163SNithin Dabilpuram /* PIR/CIR enable */
1703d0641163SNithin Dabilpuram rvu_write64(rvu, blkaddr, sw_xoff, 1);
1704d0641163SNithin Dabilpuram if (md_debug0) {
1705d0641163SNithin Dabilpuram poll_tmo = jiffies + usecs_to_jiffies(10000);
1706d0641163SNithin Dabilpuram /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1707d0641163SNithin Dabilpuram do {
1708d0641163SNithin Dabilpuram if (time_after(jiffies, poll_tmo)) {
1709d0641163SNithin Dabilpuram dev_err(rvu->dev,
1710d0641163SNithin Dabilpuram "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1711d0641163SNithin Dabilpuram nixlf, schq, lvl);
1712d0641163SNithin Dabilpuram goto exit;
1713d0641163SNithin Dabilpuram }
1714d0641163SNithin Dabilpuram usleep_range(1, 5);
1715d0641163SNithin Dabilpuram dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1716d0641163SNithin Dabilpuram } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1717d0641163SNithin Dabilpuram }
1718d0641163SNithin Dabilpuram rvu_write64(rvu, blkaddr, reg, regval);
1719d0641163SNithin Dabilpuram exit:
1720d0641163SNithin Dabilpuram rvu_write64(rvu, blkaddr, sw_xoff, 0);
1721d0641163SNithin Dabilpuram return true;
1722d0641163SNithin Dabilpuram }
1723d0641163SNithin Dabilpuram
nix_reset_tx_schedule(struct rvu * rvu,int blkaddr,int lvl,int schq)17246b4b2dedSHariprasad Kelam static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
17256b4b2dedSHariprasad Kelam int lvl, int schq)
17266b4b2dedSHariprasad Kelam {
17276b4b2dedSHariprasad Kelam u64 tlx_parent = 0, tlx_schedule = 0;
17286b4b2dedSHariprasad Kelam
17296b4b2dedSHariprasad Kelam switch (lvl) {
17306b4b2dedSHariprasad Kelam case NIX_TXSCH_LVL_TL2:
17316b4b2dedSHariprasad Kelam tlx_parent = NIX_AF_TL2X_PARENT(schq);
17326b4b2dedSHariprasad Kelam tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
17336b4b2dedSHariprasad Kelam break;
17346b4b2dedSHariprasad Kelam case NIX_TXSCH_LVL_TL3:
17356b4b2dedSHariprasad Kelam tlx_parent = NIX_AF_TL3X_PARENT(schq);
17366b4b2dedSHariprasad Kelam tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
17376b4b2dedSHariprasad Kelam break;
17386b4b2dedSHariprasad Kelam case NIX_TXSCH_LVL_TL4:
17396b4b2dedSHariprasad Kelam tlx_parent = NIX_AF_TL4X_PARENT(schq);
17406b4b2dedSHariprasad Kelam tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
17416b4b2dedSHariprasad Kelam break;
17426b4b2dedSHariprasad Kelam case NIX_TXSCH_LVL_MDQ:
17436b4b2dedSHariprasad Kelam /* no need to reset SMQ_CFG as HW clears this CSR
17446b4b2dedSHariprasad Kelam * on SMQ flush
17456b4b2dedSHariprasad Kelam */
17466b4b2dedSHariprasad Kelam tlx_parent = NIX_AF_MDQX_PARENT(schq);
17476b4b2dedSHariprasad Kelam tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
17486b4b2dedSHariprasad Kelam break;
17496b4b2dedSHariprasad Kelam default:
17506b4b2dedSHariprasad Kelam return;
17516b4b2dedSHariprasad Kelam }
17526b4b2dedSHariprasad Kelam
17536b4b2dedSHariprasad Kelam if (tlx_parent)
17546b4b2dedSHariprasad Kelam rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
17556b4b2dedSHariprasad Kelam
17566b4b2dedSHariprasad Kelam if (tlx_schedule)
17576b4b2dedSHariprasad Kelam rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
17586b4b2dedSHariprasad Kelam }
17596b4b2dedSHariprasad Kelam
1760a3e7121cSSunil Goutham /* Disable shaping of pkts by a scheduler queue
1761a3e7121cSSunil Goutham * at a given scheduler level.
1762a3e7121cSSunil Goutham */
nix_reset_tx_shaping(struct rvu * rvu,int blkaddr,int nixlf,int lvl,int schq)1763a3e7121cSSunil Goutham static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1764d0641163SNithin Dabilpuram int nixlf, int lvl, int schq)
1765a3e7121cSSunil Goutham {
1766d0641163SNithin Dabilpuram struct rvu_hwinfo *hw = rvu->hw;
1767a3e7121cSSunil Goutham u64 cir_reg = 0, pir_reg = 0;
1768a3e7121cSSunil Goutham u64 cfg;
1769a3e7121cSSunil Goutham
1770a3e7121cSSunil Goutham switch (lvl) {
1771a3e7121cSSunil Goutham case NIX_TXSCH_LVL_TL1:
1772a3e7121cSSunil Goutham cir_reg = NIX_AF_TL1X_CIR(schq);
1773a3e7121cSSunil Goutham pir_reg = 0; /* PIR not available at TL1 */
1774a3e7121cSSunil Goutham break;
1775a3e7121cSSunil Goutham case NIX_TXSCH_LVL_TL2:
1776a3e7121cSSunil Goutham cir_reg = NIX_AF_TL2X_CIR(schq);
1777a3e7121cSSunil Goutham pir_reg = NIX_AF_TL2X_PIR(schq);
1778a3e7121cSSunil Goutham break;
1779a3e7121cSSunil Goutham case NIX_TXSCH_LVL_TL3:
1780a3e7121cSSunil Goutham cir_reg = NIX_AF_TL3X_CIR(schq);
1781a3e7121cSSunil Goutham pir_reg = NIX_AF_TL3X_PIR(schq);
1782a3e7121cSSunil Goutham break;
1783a3e7121cSSunil Goutham case NIX_TXSCH_LVL_TL4:
1784a3e7121cSSunil Goutham cir_reg = NIX_AF_TL4X_CIR(schq);
1785a3e7121cSSunil Goutham pir_reg = NIX_AF_TL4X_PIR(schq);
1786a3e7121cSSunil Goutham break;
1787d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_MDQ:
1788d0641163SNithin Dabilpuram cir_reg = NIX_AF_MDQX_CIR(schq);
1789d0641163SNithin Dabilpuram pir_reg = NIX_AF_MDQX_PIR(schq);
1790d0641163SNithin Dabilpuram break;
1791d0641163SNithin Dabilpuram }
1792d0641163SNithin Dabilpuram
1793d0641163SNithin Dabilpuram /* Shaper state toggle needs wait/poll */
1794d0641163SNithin Dabilpuram if (hw->cap.nix_shaper_toggle_wait) {
1795d0641163SNithin Dabilpuram if (cir_reg)
1796d0641163SNithin Dabilpuram handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1797d0641163SNithin Dabilpuram lvl, cir_reg, 0);
1798d0641163SNithin Dabilpuram if (pir_reg)
1799d0641163SNithin Dabilpuram handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1800d0641163SNithin Dabilpuram lvl, pir_reg, 0);
1801d0641163SNithin Dabilpuram return;
1802a3e7121cSSunil Goutham }
1803a3e7121cSSunil Goutham
1804a3e7121cSSunil Goutham if (!cir_reg)
1805a3e7121cSSunil Goutham return;
1806a3e7121cSSunil Goutham cfg = rvu_read64(rvu, blkaddr, cir_reg);
1807a3e7121cSSunil Goutham rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1808a3e7121cSSunil Goutham
1809a3e7121cSSunil Goutham if (!pir_reg)
1810a3e7121cSSunil Goutham return;
1811a3e7121cSSunil Goutham cfg = rvu_read64(rvu, blkaddr, pir_reg);
1812a3e7121cSSunil Goutham rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1813a3e7121cSSunil Goutham }
1814a3e7121cSSunil Goutham
nix_reset_tx_linkcfg(struct rvu * rvu,int blkaddr,int lvl,int schq)1815a3e7121cSSunil Goutham static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1816a3e7121cSSunil Goutham int lvl, int schq)
1817a3e7121cSSunil Goutham {
1818a3e7121cSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
1819d0641163SNithin Dabilpuram int link_level;
1820a3e7121cSSunil Goutham int link;
1821a3e7121cSSunil Goutham
18225d9b976dSSunil Goutham if (lvl >= hw->cap.nix_tx_aggr_lvl)
18235d9b976dSSunil Goutham return;
18245d9b976dSSunil Goutham
1825a3e7121cSSunil Goutham /* Reset TL4's SDP link config */
1826a3e7121cSSunil Goutham if (lvl == NIX_TXSCH_LVL_TL4)
1827a3e7121cSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1828a3e7121cSSunil Goutham
1829d0641163SNithin Dabilpuram link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1830d0641163SNithin Dabilpuram NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1831d0641163SNithin Dabilpuram if (lvl != link_level)
1832a3e7121cSSunil Goutham return;
1833a3e7121cSSunil Goutham
1834b279bbb3SSunil Goutham /* Reset TL2's CGX or LBK link config */
1835a3e7121cSSunil Goutham for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1836a3e7121cSSunil Goutham rvu_write64(rvu, blkaddr,
1837a3e7121cSSunil Goutham NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1838a3e7121cSSunil Goutham }
1839a3e7121cSSunil Goutham
nix_clear_tx_xoff(struct rvu * rvu,int blkaddr,int lvl,int schq)1840d0641163SNithin Dabilpuram static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
1841d0641163SNithin Dabilpuram int lvl, int schq)
1842d0641163SNithin Dabilpuram {
1843d0641163SNithin Dabilpuram struct rvu_hwinfo *hw = rvu->hw;
1844d0641163SNithin Dabilpuram u64 reg;
1845d0641163SNithin Dabilpuram
1846d0641163SNithin Dabilpuram /* Skip this if shaping is not supported */
1847d0641163SNithin Dabilpuram if (!hw->cap.nix_shaping)
1848d0641163SNithin Dabilpuram return;
1849d0641163SNithin Dabilpuram
1850d0641163SNithin Dabilpuram /* Clear level specific SW_XOFF */
1851d0641163SNithin Dabilpuram switch (lvl) {
1852d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_TL1:
1853d0641163SNithin Dabilpuram reg = NIX_AF_TL1X_SW_XOFF(schq);
1854d0641163SNithin Dabilpuram break;
1855d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_TL2:
1856d0641163SNithin Dabilpuram reg = NIX_AF_TL2X_SW_XOFF(schq);
1857d0641163SNithin Dabilpuram break;
1858d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_TL3:
1859d0641163SNithin Dabilpuram reg = NIX_AF_TL3X_SW_XOFF(schq);
1860d0641163SNithin Dabilpuram break;
1861d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_TL4:
1862d0641163SNithin Dabilpuram reg = NIX_AF_TL4X_SW_XOFF(schq);
1863d0641163SNithin Dabilpuram break;
1864d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_MDQ:
1865d0641163SNithin Dabilpuram reg = NIX_AF_MDQX_SW_XOFF(schq);
1866d0641163SNithin Dabilpuram break;
1867d0641163SNithin Dabilpuram default:
1868d0641163SNithin Dabilpuram return;
1869d0641163SNithin Dabilpuram }
1870d0641163SNithin Dabilpuram
1871d0641163SNithin Dabilpuram rvu_write64(rvu, blkaddr, reg, 0x0);
1872d0641163SNithin Dabilpuram }
1873d0641163SNithin Dabilpuram
nix_get_tx_link(struct rvu * rvu,u16 pcifunc)18745d9b976dSSunil Goutham static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
187526dda7daSNithin Dabilpuram {
18765d9b976dSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
18775d9b976dSSunil Goutham int pf = rvu_get_pf(pcifunc);
18785d9b976dSSunil Goutham u8 cgx_id = 0, lmac_id = 0;
18795d9b976dSSunil Goutham
18805d9b976dSSunil Goutham if (is_afvf(pcifunc)) {/* LBK links */
18815d9b976dSSunil Goutham return hw->cgx_links;
18825d9b976dSSunil Goutham } else if (is_pf_cgxmapped(rvu, pf)) {
18835d9b976dSSunil Goutham rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
18845d9b976dSSunil Goutham return (cgx_id * hw->lmac_per_cgx) + lmac_id;
18855d9b976dSSunil Goutham }
18865d9b976dSSunil Goutham
18875d9b976dSSunil Goutham /* SDP link */
18885d9b976dSSunil Goutham return hw->cgx_links + hw->lbk_links;
18895d9b976dSSunil Goutham }
18905d9b976dSSunil Goutham
nix_get_txschq_range(struct rvu * rvu,u16 pcifunc,int link,int * start,int * end)18915d9b976dSSunil Goutham static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
18925d9b976dSSunil Goutham int link, int *start, int *end)
18935d9b976dSSunil Goutham {
18945d9b976dSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
18955d9b976dSSunil Goutham int pf = rvu_get_pf(pcifunc);
18965d9b976dSSunil Goutham
18975d9b976dSSunil Goutham if (is_afvf(pcifunc)) { /* LBK links */
18985d9b976dSSunil Goutham *start = hw->cap.nix_txsch_per_cgx_lmac * link;
18995d9b976dSSunil Goutham *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
19005d9b976dSSunil Goutham } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
19015d9b976dSSunil Goutham *start = hw->cap.nix_txsch_per_cgx_lmac * link;
19025d9b976dSSunil Goutham *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
19035d9b976dSSunil Goutham } else { /* SDP link */
19045d9b976dSSunil Goutham *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
19055d9b976dSSunil Goutham (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
19065d9b976dSSunil Goutham *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
19075d9b976dSSunil Goutham }
19085d9b976dSSunil Goutham }
19095d9b976dSSunil Goutham
nix_check_txschq_alloc_req(struct rvu * rvu,int lvl,u16 pcifunc,struct nix_hw * nix_hw,struct nix_txsch_alloc_req * req)19105d9b976dSSunil Goutham static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
19115d9b976dSSunil Goutham struct nix_hw *nix_hw,
19125d9b976dSSunil Goutham struct nix_txsch_alloc_req *req)
19135d9b976dSSunil Goutham {
19145d9b976dSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
19155d9b976dSSunil Goutham int schq, req_schq, free_cnt;
191626dda7daSNithin Dabilpuram struct nix_txsch *txsch;
19175d9b976dSSunil Goutham int link, start, end;
191826dda7daSNithin Dabilpuram
19195d9b976dSSunil Goutham txsch = &nix_hw->txsch[lvl];
19205d9b976dSSunil Goutham req_schq = req->schq_contig[lvl] + req->schq[lvl];
192126dda7daSNithin Dabilpuram
19225d9b976dSSunil Goutham if (!req_schq)
19235d9b976dSSunil Goutham return 0;
192426dda7daSNithin Dabilpuram
19255d9b976dSSunil Goutham link = nix_get_tx_link(rvu, pcifunc);
192626dda7daSNithin Dabilpuram
19275d9b976dSSunil Goutham /* For traffic aggregating scheduler level, one queue is enough */
19285d9b976dSSunil Goutham if (lvl >= hw->cap.nix_tx_aggr_lvl) {
19295d9b976dSSunil Goutham if (req_schq != 1)
19305d9b976dSSunil Goutham return NIX_AF_ERR_TLX_ALLOC_FAIL;
19315d9b976dSSunil Goutham return 0;
193226dda7daSNithin Dabilpuram }
193326dda7daSNithin Dabilpuram
19345d9b976dSSunil Goutham /* Get free SCHQ count and check if request can be accomodated */
19355d9b976dSSunil Goutham if (hw->cap.nix_fixed_txschq_mapping) {
19365d9b976dSSunil Goutham nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
19375d9b976dSSunil Goutham schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
19385d9b976dSSunil Goutham if (end <= txsch->schq.max && schq < end &&
19395d9b976dSSunil Goutham !test_bit(schq, txsch->schq.bmap))
19405d9b976dSSunil Goutham free_cnt = 1;
19415d9b976dSSunil Goutham else
19425d9b976dSSunil Goutham free_cnt = 0;
19435d9b976dSSunil Goutham } else {
19445d9b976dSSunil Goutham free_cnt = rvu_rsrc_free_count(&txsch->schq);
194526dda7daSNithin Dabilpuram }
194626dda7daSNithin Dabilpuram
19474e635f9dSSatha Rao if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
19484e635f9dSSatha Rao req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
19495d9b976dSSunil Goutham return NIX_AF_ERR_TLX_ALLOC_FAIL;
19505d9b976dSSunil Goutham
19515d9b976dSSunil Goutham /* If contiguous queues are needed, check for availability */
19525d9b976dSSunil Goutham if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
19535d9b976dSSunil Goutham !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
19545d9b976dSSunil Goutham return NIX_AF_ERR_TLX_ALLOC_FAIL;
195526dda7daSNithin Dabilpuram
195626dda7daSNithin Dabilpuram return 0;
195726dda7daSNithin Dabilpuram }
195826dda7daSNithin Dabilpuram
nix_txsch_alloc(struct rvu * rvu,struct nix_txsch * txsch,struct nix_txsch_alloc_rsp * rsp,int lvl,int start,int end)19595d9b976dSSunil Goutham static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
19605d9b976dSSunil Goutham struct nix_txsch_alloc_rsp *rsp,
19615d9b976dSSunil Goutham int lvl, int start, int end)
19625d9b976dSSunil Goutham {
19635d9b976dSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
19645d9b976dSSunil Goutham u16 pcifunc = rsp->hdr.pcifunc;
19655d9b976dSSunil Goutham int idx, schq;
19665d9b976dSSunil Goutham
19675d9b976dSSunil Goutham /* For traffic aggregating levels, queue alloc is based
19685d9b976dSSunil Goutham * on transmit link to which PF_FUNC is mapped to.
19695d9b976dSSunil Goutham */
19705d9b976dSSunil Goutham if (lvl >= hw->cap.nix_tx_aggr_lvl) {
19715d9b976dSSunil Goutham /* A single TL queue is allocated */
19725d9b976dSSunil Goutham if (rsp->schq_contig[lvl]) {
19735d9b976dSSunil Goutham rsp->schq_contig[lvl] = 1;
19745d9b976dSSunil Goutham rsp->schq_contig_list[lvl][0] = start;
19755d9b976dSSunil Goutham }
19765d9b976dSSunil Goutham
19775d9b976dSSunil Goutham /* Both contig and non-contig reqs doesn't make sense here */
19785d9b976dSSunil Goutham if (rsp->schq_contig[lvl])
19795d9b976dSSunil Goutham rsp->schq[lvl] = 0;
19805d9b976dSSunil Goutham
19815d9b976dSSunil Goutham if (rsp->schq[lvl]) {
19825d9b976dSSunil Goutham rsp->schq[lvl] = 1;
19835d9b976dSSunil Goutham rsp->schq_list[lvl][0] = start;
19845d9b976dSSunil Goutham }
19855d9b976dSSunil Goutham return;
19865d9b976dSSunil Goutham }
19875d9b976dSSunil Goutham
19885d9b976dSSunil Goutham /* Adjust the queue request count if HW supports
19895d9b976dSSunil Goutham * only one queue per level configuration.
19905d9b976dSSunil Goutham */
19915d9b976dSSunil Goutham if (hw->cap.nix_fixed_txschq_mapping) {
19925d9b976dSSunil Goutham idx = pcifunc & RVU_PFVF_FUNC_MASK;
19935d9b976dSSunil Goutham schq = start + idx;
19945d9b976dSSunil Goutham if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
19955d9b976dSSunil Goutham rsp->schq_contig[lvl] = 0;
19965d9b976dSSunil Goutham rsp->schq[lvl] = 0;
19975d9b976dSSunil Goutham return;
19985d9b976dSSunil Goutham }
19995d9b976dSSunil Goutham
20005d9b976dSSunil Goutham if (rsp->schq_contig[lvl]) {
20015d9b976dSSunil Goutham rsp->schq_contig[lvl] = 1;
20025d9b976dSSunil Goutham set_bit(schq, txsch->schq.bmap);
20035d9b976dSSunil Goutham rsp->schq_contig_list[lvl][0] = schq;
20045d9b976dSSunil Goutham rsp->schq[lvl] = 0;
20055d9b976dSSunil Goutham } else if (rsp->schq[lvl]) {
20065d9b976dSSunil Goutham rsp->schq[lvl] = 1;
20075d9b976dSSunil Goutham set_bit(schq, txsch->schq.bmap);
20085d9b976dSSunil Goutham rsp->schq_list[lvl][0] = schq;
20095d9b976dSSunil Goutham }
20105d9b976dSSunil Goutham return;
20115d9b976dSSunil Goutham }
20125d9b976dSSunil Goutham
20135d9b976dSSunil Goutham /* Allocate contiguous queue indices requesty first */
20145d9b976dSSunil Goutham if (rsp->schq_contig[lvl]) {
20155d9b976dSSunil Goutham schq = bitmap_find_next_zero_area(txsch->schq.bmap,
20165d9b976dSSunil Goutham txsch->schq.max, start,
20175d9b976dSSunil Goutham rsp->schq_contig[lvl], 0);
20185d9b976dSSunil Goutham if (schq >= end)
20195d9b976dSSunil Goutham rsp->schq_contig[lvl] = 0;
20205d9b976dSSunil Goutham for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
20215d9b976dSSunil Goutham set_bit(schq, txsch->schq.bmap);
20225d9b976dSSunil Goutham rsp->schq_contig_list[lvl][idx] = schq;
20235d9b976dSSunil Goutham schq++;
20245d9b976dSSunil Goutham }
20255d9b976dSSunil Goutham }
20265d9b976dSSunil Goutham
20275d9b976dSSunil Goutham /* Allocate non-contiguous queue indices */
20285d9b976dSSunil Goutham if (rsp->schq[lvl]) {
20295d9b976dSSunil Goutham idx = 0;
20305d9b976dSSunil Goutham for (schq = start; schq < end; schq++) {
20315d9b976dSSunil Goutham if (!test_bit(schq, txsch->schq.bmap)) {
20325d9b976dSSunil Goutham set_bit(schq, txsch->schq.bmap);
20335d9b976dSSunil Goutham rsp->schq_list[lvl][idx++] = schq;
20345d9b976dSSunil Goutham }
20355d9b976dSSunil Goutham if (idx == rsp->schq[lvl])
20365d9b976dSSunil Goutham break;
20375d9b976dSSunil Goutham }
20385d9b976dSSunil Goutham /* Update how many were allocated */
20395d9b976dSSunil Goutham rsp->schq[lvl] = idx;
20405d9b976dSSunil Goutham }
20415d9b976dSSunil Goutham }
20425d9b976dSSunil Goutham
rvu_mbox_handler_nix_txsch_alloc(struct rvu * rvu,struct nix_txsch_alloc_req * req,struct nix_txsch_alloc_rsp * rsp)2043eac66686SSunil Goutham int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
2044a3e7121cSSunil Goutham struct nix_txsch_alloc_req *req,
2045a3e7121cSSunil Goutham struct nix_txsch_alloc_rsp *rsp)
2046a3e7121cSSunil Goutham {
20475d9b976dSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
2048a3e7121cSSunil Goutham u16 pcifunc = req->hdr.pcifunc;
20495d9b976dSSunil Goutham int link, blkaddr, rc = 0;
20505d9b976dSSunil Goutham int lvl, idx, start, end;
2051a3e7121cSSunil Goutham struct nix_txsch *txsch;
2052a3e7121cSSunil Goutham struct nix_hw *nix_hw;
205326dda7daSNithin Dabilpuram u32 *pfvf_map;
2054d0641163SNithin Dabilpuram int nixlf;
2055a3e7121cSSunil Goutham u16 schq;
2056a3e7121cSSunil Goutham
2057d0641163SNithin Dabilpuram rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2058d0641163SNithin Dabilpuram if (rc)
2059d0641163SNithin Dabilpuram return rc;
2060a3e7121cSSunil Goutham
2061a3e7121cSSunil Goutham nix_hw = get_nix_hw(rvu->hw, blkaddr);
2062a3e7121cSSunil Goutham if (!nix_hw)
20637278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
2064a3e7121cSSunil Goutham
20650964fc8fSStanislaw Kardach mutex_lock(&rvu->rsrc_lock);
20665d9b976dSSunil Goutham
20675d9b976dSSunil Goutham /* Check if request is valid as per HW capabilities
20685d9b976dSSunil Goutham * and can be accomodated.
20695d9b976dSSunil Goutham */
2070a3e7121cSSunil Goutham for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
20715d9b976dSSunil Goutham rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
20725d9b976dSSunil Goutham if (rc)
2073a3e7121cSSunil Goutham goto err;
2074a3e7121cSSunil Goutham }
2075a3e7121cSSunil Goutham
20765d9b976dSSunil Goutham /* Allocate requested Tx scheduler queues */
2077a3e7121cSSunil Goutham for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2078a3e7121cSSunil Goutham txsch = &nix_hw->txsch[lvl];
207926dda7daSNithin Dabilpuram pfvf_map = txsch->pfvf_map;
2080a3e7121cSSunil Goutham
208126dda7daSNithin Dabilpuram if (!req->schq[lvl] && !req->schq_contig[lvl])
208226dda7daSNithin Dabilpuram continue;
208326dda7daSNithin Dabilpuram
20845d9b976dSSunil Goutham rsp->schq[lvl] = req->schq[lvl];
20855d9b976dSSunil Goutham rsp->schq_contig[lvl] = req->schq_contig[lvl];
208626dda7daSNithin Dabilpuram
20875d9b976dSSunil Goutham link = nix_get_tx_link(rvu, pcifunc);
20885d9b976dSSunil Goutham
20895d9b976dSSunil Goutham if (lvl >= hw->cap.nix_tx_aggr_lvl) {
20905d9b976dSSunil Goutham start = link;
20915d9b976dSSunil Goutham end = link;
20925d9b976dSSunil Goutham } else if (hw->cap.nix_fixed_txschq_mapping) {
20935d9b976dSSunil Goutham nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
20945d9b976dSSunil Goutham } else {
20955d9b976dSSunil Goutham start = 0;
20965d9b976dSSunil Goutham end = txsch->schq.max;
209726dda7daSNithin Dabilpuram }
209826dda7daSNithin Dabilpuram
20995d9b976dSSunil Goutham nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2100a3e7121cSSunil Goutham
21015d9b976dSSunil Goutham /* Reset queue config */
2102a3e7121cSSunil Goutham for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
21035d9b976dSSunil Goutham schq = rsp->schq_contig_list[lvl][idx];
21045d9b976dSSunil Goutham if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
21055d9b976dSSunil Goutham NIX_TXSCHQ_CFG_DONE))
210626dda7daSNithin Dabilpuram pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2107a3e7121cSSunil Goutham nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2108d0641163SNithin Dabilpuram nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
21096b4b2dedSHariprasad Kelam nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
21105d9b976dSSunil Goutham }
21115d9b976dSSunil Goutham
21125d9b976dSSunil Goutham for (idx = 0; idx < req->schq[lvl]; idx++) {
21135d9b976dSSunil Goutham schq = rsp->schq_list[lvl][idx];
21145d9b976dSSunil Goutham if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
21155d9b976dSSunil Goutham NIX_TXSCHQ_CFG_DONE))
21165d9b976dSSunil Goutham pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
21175d9b976dSSunil Goutham nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2118d0641163SNithin Dabilpuram nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
21196b4b2dedSHariprasad Kelam nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2120a3e7121cSSunil Goutham }
2121a3e7121cSSunil Goutham }
2122a3e7121cSSunil Goutham
21235d9b976dSSunil Goutham rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
21245d9b976dSSunil Goutham rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
21255d9b976dSSunil Goutham rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
21265d9b976dSSunil Goutham NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
21275d9b976dSSunil Goutham NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2128a3e7121cSSunil Goutham goto exit;
2129a3e7121cSSunil Goutham err:
2130a3e7121cSSunil Goutham rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2131a3e7121cSSunil Goutham exit:
21320964fc8fSStanislaw Kardach mutex_unlock(&rvu->rsrc_lock);
2133a3e7121cSSunil Goutham return rc;
2134a3e7121cSSunil Goutham }
2135a3e7121cSSunil Goutham
nix_smq_flush_fill_ctx(struct rvu * rvu,int blkaddr,int smq,struct nix_smq_flush_ctx * smq_flush_ctx)2136e18aab04SNaveen Mamindlapalli static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
2137e18aab04SNaveen Mamindlapalli struct nix_smq_flush_ctx *smq_flush_ctx)
2138e18aab04SNaveen Mamindlapalli {
2139e18aab04SNaveen Mamindlapalli struct nix_smq_tree_ctx *smq_tree_ctx;
2140e18aab04SNaveen Mamindlapalli u64 parent_off, regval;
2141e18aab04SNaveen Mamindlapalli u16 schq;
2142e18aab04SNaveen Mamindlapalli int lvl;
2143e18aab04SNaveen Mamindlapalli
2144e18aab04SNaveen Mamindlapalli smq_flush_ctx->smq = smq;
2145e18aab04SNaveen Mamindlapalli
2146e18aab04SNaveen Mamindlapalli schq = smq;
2147e18aab04SNaveen Mamindlapalli for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2148e18aab04SNaveen Mamindlapalli smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
21495bfbf2c1SNaveen Mamindlapalli smq_tree_ctx->schq = schq;
2150e18aab04SNaveen Mamindlapalli if (lvl == NIX_TXSCH_LVL_TL1) {
2151e18aab04SNaveen Mamindlapalli smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
2152e18aab04SNaveen Mamindlapalli smq_tree_ctx->pir_off = 0;
2153e18aab04SNaveen Mamindlapalli smq_tree_ctx->pir_val = 0;
2154e18aab04SNaveen Mamindlapalli parent_off = 0;
2155e18aab04SNaveen Mamindlapalli } else if (lvl == NIX_TXSCH_LVL_TL2) {
2156e18aab04SNaveen Mamindlapalli smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
2157e18aab04SNaveen Mamindlapalli smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
2158e18aab04SNaveen Mamindlapalli parent_off = NIX_AF_TL2X_PARENT(schq);
2159e18aab04SNaveen Mamindlapalli } else if (lvl == NIX_TXSCH_LVL_TL3) {
2160e18aab04SNaveen Mamindlapalli smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
2161e18aab04SNaveen Mamindlapalli smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
2162e18aab04SNaveen Mamindlapalli parent_off = NIX_AF_TL3X_PARENT(schq);
2163e18aab04SNaveen Mamindlapalli } else if (lvl == NIX_TXSCH_LVL_TL4) {
2164e18aab04SNaveen Mamindlapalli smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
2165e18aab04SNaveen Mamindlapalli smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
2166e18aab04SNaveen Mamindlapalli parent_off = NIX_AF_TL4X_PARENT(schq);
2167e18aab04SNaveen Mamindlapalli } else if (lvl == NIX_TXSCH_LVL_MDQ) {
2168e18aab04SNaveen Mamindlapalli smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
2169e18aab04SNaveen Mamindlapalli smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
2170e18aab04SNaveen Mamindlapalli parent_off = NIX_AF_MDQX_PARENT(schq);
2171e18aab04SNaveen Mamindlapalli }
2172e18aab04SNaveen Mamindlapalli /* save cir/pir register values */
2173e18aab04SNaveen Mamindlapalli smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
2174e18aab04SNaveen Mamindlapalli if (smq_tree_ctx->pir_off)
2175e18aab04SNaveen Mamindlapalli smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
2176e18aab04SNaveen Mamindlapalli
2177e18aab04SNaveen Mamindlapalli /* get parent txsch node */
2178e18aab04SNaveen Mamindlapalli if (parent_off) {
2179e18aab04SNaveen Mamindlapalli regval = rvu_read64(rvu, blkaddr, parent_off);
2180e18aab04SNaveen Mamindlapalli schq = (regval >> 16) & 0x1FF;
2181e18aab04SNaveen Mamindlapalli }
2182e18aab04SNaveen Mamindlapalli }
2183e18aab04SNaveen Mamindlapalli }
2184e18aab04SNaveen Mamindlapalli
nix_smq_flush_enadis_xoff(struct rvu * rvu,int blkaddr,struct nix_smq_flush_ctx * smq_flush_ctx,bool enable)2185e18aab04SNaveen Mamindlapalli static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
2186e18aab04SNaveen Mamindlapalli struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2187e18aab04SNaveen Mamindlapalli {
2188e18aab04SNaveen Mamindlapalli struct nix_txsch *txsch;
2189e18aab04SNaveen Mamindlapalli struct nix_hw *nix_hw;
21905bfbf2c1SNaveen Mamindlapalli int tl2, tl2_schq;
2191e18aab04SNaveen Mamindlapalli u64 regoff;
2192e18aab04SNaveen Mamindlapalli
2193e18aab04SNaveen Mamindlapalli nix_hw = get_nix_hw(rvu->hw, blkaddr);
2194e18aab04SNaveen Mamindlapalli if (!nix_hw)
2195e18aab04SNaveen Mamindlapalli return;
2196e18aab04SNaveen Mamindlapalli
2197e18aab04SNaveen Mamindlapalli /* loop through all TL2s with matching PF_FUNC */
2198e18aab04SNaveen Mamindlapalli txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
21995bfbf2c1SNaveen Mamindlapalli tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
2200e18aab04SNaveen Mamindlapalli for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
2201e18aab04SNaveen Mamindlapalli /* skip the smq(flush) TL2 */
22025bfbf2c1SNaveen Mamindlapalli if (tl2 == tl2_schq)
2203e18aab04SNaveen Mamindlapalli continue;
2204e18aab04SNaveen Mamindlapalli /* skip unused TL2s */
2205e18aab04SNaveen Mamindlapalli if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
2206e18aab04SNaveen Mamindlapalli continue;
2207e18aab04SNaveen Mamindlapalli /* skip if PF_FUNC doesn't match */
2208e18aab04SNaveen Mamindlapalli if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
22095bfbf2c1SNaveen Mamindlapalli (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
2210e18aab04SNaveen Mamindlapalli ~RVU_PFVF_FUNC_MASK)))
2211e18aab04SNaveen Mamindlapalli continue;
2212e18aab04SNaveen Mamindlapalli /* enable/disable XOFF */
2213e18aab04SNaveen Mamindlapalli regoff = NIX_AF_TL2X_SW_XOFF(tl2);
2214e18aab04SNaveen Mamindlapalli if (enable)
2215e18aab04SNaveen Mamindlapalli rvu_write64(rvu, blkaddr, regoff, 0x1);
2216e18aab04SNaveen Mamindlapalli else
2217e18aab04SNaveen Mamindlapalli rvu_write64(rvu, blkaddr, regoff, 0x0);
2218e18aab04SNaveen Mamindlapalli }
2219e18aab04SNaveen Mamindlapalli }
2220e18aab04SNaveen Mamindlapalli
nix_smq_flush_enadis_rate(struct rvu * rvu,int blkaddr,struct nix_smq_flush_ctx * smq_flush_ctx,bool enable)2221e18aab04SNaveen Mamindlapalli static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
2222e18aab04SNaveen Mamindlapalli struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2223e18aab04SNaveen Mamindlapalli {
2224e18aab04SNaveen Mamindlapalli u64 cir_off, pir_off, cir_val, pir_val;
2225e18aab04SNaveen Mamindlapalli struct nix_smq_tree_ctx *smq_tree_ctx;
2226e18aab04SNaveen Mamindlapalli int lvl;
2227e18aab04SNaveen Mamindlapalli
2228e18aab04SNaveen Mamindlapalli for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2229e18aab04SNaveen Mamindlapalli smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2230e18aab04SNaveen Mamindlapalli cir_off = smq_tree_ctx->cir_off;
2231e18aab04SNaveen Mamindlapalli cir_val = smq_tree_ctx->cir_val;
2232e18aab04SNaveen Mamindlapalli pir_off = smq_tree_ctx->pir_off;
2233e18aab04SNaveen Mamindlapalli pir_val = smq_tree_ctx->pir_val;
2234e18aab04SNaveen Mamindlapalli
2235e18aab04SNaveen Mamindlapalli if (enable) {
2236e18aab04SNaveen Mamindlapalli rvu_write64(rvu, blkaddr, cir_off, cir_val);
2237e18aab04SNaveen Mamindlapalli if (lvl != NIX_TXSCH_LVL_TL1)
2238e18aab04SNaveen Mamindlapalli rvu_write64(rvu, blkaddr, pir_off, pir_val);
2239e18aab04SNaveen Mamindlapalli } else {
2240e18aab04SNaveen Mamindlapalli rvu_write64(rvu, blkaddr, cir_off, 0x0);
2241e18aab04SNaveen Mamindlapalli if (lvl != NIX_TXSCH_LVL_TL1)
2242e18aab04SNaveen Mamindlapalli rvu_write64(rvu, blkaddr, pir_off, 0x0);
2243e18aab04SNaveen Mamindlapalli }
2244e18aab04SNaveen Mamindlapalli }
2245e18aab04SNaveen Mamindlapalli }
2246e18aab04SNaveen Mamindlapalli
nix_smq_flush(struct rvu * rvu,int blkaddr,int smq,u16 pcifunc,int nixlf)2247d0641163SNithin Dabilpuram static int nix_smq_flush(struct rvu *rvu, int blkaddr,
22485d9b976dSSunil Goutham int smq, u16 pcifunc, int nixlf)
22495d9b976dSSunil Goutham {
2250e18aab04SNaveen Mamindlapalli struct nix_smq_flush_ctx *smq_flush_ctx;
22515bfbf2c1SNaveen Mamindlapalli int err, restore_tx_en = 0, i;
22525d9b976dSSunil Goutham int pf = rvu_get_pf(pcifunc);
22535d9b976dSSunil Goutham u8 cgx_id = 0, lmac_id = 0;
22545bfbf2c1SNaveen Mamindlapalli u16 tl2_tl3_link_schq;
22555bfbf2c1SNaveen Mamindlapalli u8 link, link_level;
22565bfbf2c1SNaveen Mamindlapalli u64 cfg, bmap = 0;
22575d9b976dSSunil Goutham
2258933a01adSGeetha sowjanya if (!is_rvu_otx2(rvu)) {
2259933a01adSGeetha sowjanya /* Skip SMQ flush if pkt count is zero */
2260933a01adSGeetha sowjanya cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
2261933a01adSGeetha sowjanya if (!cfg)
2262933a01adSGeetha sowjanya return 0;
2263933a01adSGeetha sowjanya }
2264933a01adSGeetha sowjanya
22655d9b976dSSunil Goutham /* enable cgx tx if disabled */
22665d9b976dSSunil Goutham if (is_pf_cgxmapped(rvu, pf)) {
22675d9b976dSSunil Goutham rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2268fae80edeSGeetha sowjanya restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
22695d9b976dSSunil Goutham lmac_id, true);
22705d9b976dSSunil Goutham }
22715d9b976dSSunil Goutham
2272e18aab04SNaveen Mamindlapalli /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2273e18aab04SNaveen Mamindlapalli smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
2274e18aab04SNaveen Mamindlapalli if (!smq_flush_ctx)
2275e18aab04SNaveen Mamindlapalli return -ENOMEM;
2276e18aab04SNaveen Mamindlapalli nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
2277e18aab04SNaveen Mamindlapalli nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
2278e18aab04SNaveen Mamindlapalli nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
2279e18aab04SNaveen Mamindlapalli
22805d9b976dSSunil Goutham /* Disable backpressure from physical link,
22815d9b976dSSunil Goutham * otherwise SMQ flush may stall.
22825d9b976dSSunil Goutham */
22835d9b976dSSunil Goutham rvu_cgx_enadis_rx_bp(rvu, pf, false);
22845d9b976dSSunil Goutham
22855bfbf2c1SNaveen Mamindlapalli link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
22865bfbf2c1SNaveen Mamindlapalli NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
22875bfbf2c1SNaveen Mamindlapalli tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
22885bfbf2c1SNaveen Mamindlapalli link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
22895bfbf2c1SNaveen Mamindlapalli
22905bfbf2c1SNaveen Mamindlapalli /* SMQ set enqueue xoff */
22915bfbf2c1SNaveen Mamindlapalli cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
22925bfbf2c1SNaveen Mamindlapalli cfg |= BIT_ULL(50);
22935bfbf2c1SNaveen Mamindlapalli rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
22945bfbf2c1SNaveen Mamindlapalli
22955bfbf2c1SNaveen Mamindlapalli /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
22965bfbf2c1SNaveen Mamindlapalli for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
22975bfbf2c1SNaveen Mamindlapalli cfg = rvu_read64(rvu, blkaddr,
22985bfbf2c1SNaveen Mamindlapalli NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
22995bfbf2c1SNaveen Mamindlapalli if (!(cfg & BIT_ULL(12)))
23005bfbf2c1SNaveen Mamindlapalli continue;
2301*a4bc0307SColin Ian King bmap |= BIT_ULL(i);
23025bfbf2c1SNaveen Mamindlapalli cfg &= ~BIT_ULL(12);
23035bfbf2c1SNaveen Mamindlapalli rvu_write64(rvu, blkaddr,
23045bfbf2c1SNaveen Mamindlapalli NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
23055bfbf2c1SNaveen Mamindlapalli }
23065bfbf2c1SNaveen Mamindlapalli
23075bfbf2c1SNaveen Mamindlapalli /* Do SMQ flush and set enqueue xoff */
23085bfbf2c1SNaveen Mamindlapalli cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
23095bfbf2c1SNaveen Mamindlapalli cfg |= BIT_ULL(50) | BIT_ULL(49);
23105bfbf2c1SNaveen Mamindlapalli rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
23115bfbf2c1SNaveen Mamindlapalli
23125d9b976dSSunil Goutham /* Wait for flush to complete */
23135d9b976dSSunil Goutham err = rvu_poll_reg(rvu, blkaddr,
23145d9b976dSSunil Goutham NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
23155d9b976dSSunil Goutham if (err)
2316e18aab04SNaveen Mamindlapalli dev_info(rvu->dev,
2317e18aab04SNaveen Mamindlapalli "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2318e18aab04SNaveen Mamindlapalli nixlf, smq);
2319e18aab04SNaveen Mamindlapalli
23205bfbf2c1SNaveen Mamindlapalli /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
23215bfbf2c1SNaveen Mamindlapalli for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2322*a4bc0307SColin Ian King if (!(bmap & BIT_ULL(i)))
23235bfbf2c1SNaveen Mamindlapalli continue;
23245bfbf2c1SNaveen Mamindlapalli cfg = rvu_read64(rvu, blkaddr,
23255bfbf2c1SNaveen Mamindlapalli NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
23265bfbf2c1SNaveen Mamindlapalli cfg |= BIT_ULL(12);
23275bfbf2c1SNaveen Mamindlapalli rvu_write64(rvu, blkaddr,
23285bfbf2c1SNaveen Mamindlapalli NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
23295bfbf2c1SNaveen Mamindlapalli }
23305bfbf2c1SNaveen Mamindlapalli
2331e18aab04SNaveen Mamindlapalli /* clear XOFF on TL2s */
2332e18aab04SNaveen Mamindlapalli nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
2333e18aab04SNaveen Mamindlapalli nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
2334e18aab04SNaveen Mamindlapalli kfree(smq_flush_ctx);
23355d9b976dSSunil Goutham
23365d9b976dSSunil Goutham rvu_cgx_enadis_rx_bp(rvu, pf, true);
23375d9b976dSSunil Goutham /* restore cgx tx state */
23385d9b976dSSunil Goutham if (restore_tx_en)
2339fae80edeSGeetha sowjanya rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2340d0641163SNithin Dabilpuram return err;
23415d9b976dSSunil Goutham }
23425d9b976dSSunil Goutham
nix_txschq_free(struct rvu * rvu,u16 pcifunc)2343a3e7121cSSunil Goutham static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2344a3e7121cSSunil Goutham {
2345a3e7121cSSunil Goutham int blkaddr, nixlf, lvl, schq, err;
2346a3e7121cSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
2347a3e7121cSSunil Goutham struct nix_txsch *txsch;
2348a3e7121cSSunil Goutham struct nix_hw *nix_hw;
2349d0641163SNithin Dabilpuram u16 map_func;
2350a3e7121cSSunil Goutham
2351a3e7121cSSunil Goutham blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2352a3e7121cSSunil Goutham if (blkaddr < 0)
2353a3e7121cSSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
2354a3e7121cSSunil Goutham
2355a3e7121cSSunil Goutham nix_hw = get_nix_hw(rvu->hw, blkaddr);
2356a3e7121cSSunil Goutham if (!nix_hw)
23577278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
2358a3e7121cSSunil Goutham
2359a3e7121cSSunil Goutham nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2360a3e7121cSSunil Goutham if (nixlf < 0)
2361a3e7121cSSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
2362a3e7121cSSunil Goutham
2363d0641163SNithin Dabilpuram /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
23640964fc8fSStanislaw Kardach mutex_lock(&rvu->rsrc_lock);
2365d0641163SNithin Dabilpuram for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2366d0641163SNithin Dabilpuram txsch = &nix_hw->txsch[lvl];
2367d0641163SNithin Dabilpuram
2368d0641163SNithin Dabilpuram if (lvl >= hw->cap.nix_tx_aggr_lvl)
2369a3e7121cSSunil Goutham continue;
2370a3e7121cSSunil Goutham
2371a3e7121cSSunil Goutham for (schq = 0; schq < txsch->schq.max; schq++) {
237226dda7daSNithin Dabilpuram if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2373a3e7121cSSunil Goutham continue;
2374a3e7121cSSunil Goutham nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2375d0641163SNithin Dabilpuram nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
23766b4b2dedSHariprasad Kelam nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2377a3e7121cSSunil Goutham }
2378a3e7121cSSunil Goutham }
2379d0641163SNithin Dabilpuram nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2380d0641163SNithin Dabilpuram nix_get_tx_link(rvu, pcifunc));
2381d0641163SNithin Dabilpuram
2382d0641163SNithin Dabilpuram /* On PF cleanup, clear cfg done flag as
2383d0641163SNithin Dabilpuram * PF would have changed default config.
2384d0641163SNithin Dabilpuram */
2385d0641163SNithin Dabilpuram if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2386d0641163SNithin Dabilpuram txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2387d0641163SNithin Dabilpuram schq = nix_get_tx_link(rvu, pcifunc);
2388d0641163SNithin Dabilpuram /* Do not clear pcifunc in txsch->pfvf_map[schq] because
2389d0641163SNithin Dabilpuram * VF might be using this TL1 queue
2390d0641163SNithin Dabilpuram */
2391d0641163SNithin Dabilpuram map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2392d0641163SNithin Dabilpuram txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2393d0641163SNithin Dabilpuram }
2394a3e7121cSSunil Goutham
2395a3e7121cSSunil Goutham /* Flush SMQs */
2396a3e7121cSSunil Goutham txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2397a3e7121cSSunil Goutham for (schq = 0; schq < txsch->schq.max; schq++) {
239826dda7daSNithin Dabilpuram if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2399a3e7121cSSunil Goutham continue;
24005d9b976dSSunil Goutham nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2401a3e7121cSSunil Goutham }
2402a3e7121cSSunil Goutham
2403a3e7121cSSunil Goutham /* Now free scheduler queues to free pool */
2404a3e7121cSSunil Goutham for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
24055d9b976dSSunil Goutham /* TLs above aggregation level are shared across all PF
24065d9b976dSSunil Goutham * and it's VFs, hence skip freeing them.
240726dda7daSNithin Dabilpuram */
24085d9b976dSSunil Goutham if (lvl >= hw->cap.nix_tx_aggr_lvl)
240926dda7daSNithin Dabilpuram continue;
241026dda7daSNithin Dabilpuram
2411a3e7121cSSunil Goutham txsch = &nix_hw->txsch[lvl];
2412a3e7121cSSunil Goutham for (schq = 0; schq < txsch->schq.max; schq++) {
241326dda7daSNithin Dabilpuram if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2414a3e7121cSSunil Goutham continue;
24156b4b2dedSHariprasad Kelam nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2416a3e7121cSSunil Goutham rvu_free_rsrc(&txsch->schq, schq);
24175d9b976dSSunil Goutham txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2418a3e7121cSSunil Goutham }
2419a3e7121cSSunil Goutham }
24200964fc8fSStanislaw Kardach mutex_unlock(&rvu->rsrc_lock);
2421a3e7121cSSunil Goutham
2422a3e7121cSSunil Goutham /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
2423a3e7121cSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
2424a3e7121cSSunil Goutham err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
2425a3e7121cSSunil Goutham if (err)
2426a3e7121cSSunil Goutham dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2427a3e7121cSSunil Goutham
2428a3e7121cSSunil Goutham return 0;
2429a3e7121cSSunil Goutham }
2430a3e7121cSSunil Goutham
nix_txschq_free_one(struct rvu * rvu,struct nix_txsch_free_req * req)2431e2703c5fSNithin Dabilpuram static int nix_txschq_free_one(struct rvu *rvu,
2432e2703c5fSNithin Dabilpuram struct nix_txsch_free_req *req)
2433e2703c5fSNithin Dabilpuram {
2434e2703c5fSNithin Dabilpuram struct rvu_hwinfo *hw = rvu->hw;
2435e2703c5fSNithin Dabilpuram u16 pcifunc = req->hdr.pcifunc;
24365d9b976dSSunil Goutham int lvl, schq, nixlf, blkaddr;
2437e2703c5fSNithin Dabilpuram struct nix_txsch *txsch;
2438e2703c5fSNithin Dabilpuram struct nix_hw *nix_hw;
2439e2703c5fSNithin Dabilpuram u32 *pfvf_map;
2440d0641163SNithin Dabilpuram int rc;
2441e2703c5fSNithin Dabilpuram
2442e2703c5fSNithin Dabilpuram blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2443e2703c5fSNithin Dabilpuram if (blkaddr < 0)
2444e2703c5fSNithin Dabilpuram return NIX_AF_ERR_AF_LF_INVALID;
2445e2703c5fSNithin Dabilpuram
2446e2703c5fSNithin Dabilpuram nix_hw = get_nix_hw(rvu->hw, blkaddr);
2447e2703c5fSNithin Dabilpuram if (!nix_hw)
24487278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
2449e2703c5fSNithin Dabilpuram
2450e2703c5fSNithin Dabilpuram nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2451e2703c5fSNithin Dabilpuram if (nixlf < 0)
2452e2703c5fSNithin Dabilpuram return NIX_AF_ERR_AF_LF_INVALID;
2453e2703c5fSNithin Dabilpuram
2454e2703c5fSNithin Dabilpuram lvl = req->schq_lvl;
2455e2703c5fSNithin Dabilpuram schq = req->schq;
2456e2703c5fSNithin Dabilpuram txsch = &nix_hw->txsch[lvl];
2457e2703c5fSNithin Dabilpuram
24585d9b976dSSunil Goutham if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
24595d9b976dSSunil Goutham return 0;
2460e2703c5fSNithin Dabilpuram
2461e2703c5fSNithin Dabilpuram pfvf_map = txsch->pfvf_map;
2462e2703c5fSNithin Dabilpuram mutex_lock(&rvu->rsrc_lock);
2463e2703c5fSNithin Dabilpuram
2464e2703c5fSNithin Dabilpuram if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2465d0641163SNithin Dabilpuram rc = NIX_AF_ERR_TLX_INVALID;
2466e2703c5fSNithin Dabilpuram goto err;
2467e2703c5fSNithin Dabilpuram }
2468e2703c5fSNithin Dabilpuram
2469d0641163SNithin Dabilpuram /* Clear SW_XOFF of this resource only.
2470d0641163SNithin Dabilpuram * For SMQ level, all path XOFF's
2471d0641163SNithin Dabilpuram * need to be made clear by user
2472d0641163SNithin Dabilpuram */
2473d0641163SNithin Dabilpuram nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2474d0641163SNithin Dabilpuram
24756b4b2dedSHariprasad Kelam nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
24766b4b2dedSHariprasad Kelam nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
24776b4b2dedSHariprasad Kelam
2478e2703c5fSNithin Dabilpuram /* Flush if it is a SMQ. Onus of disabling
2479e2703c5fSNithin Dabilpuram * TL2/3 queue links before SMQ flush is on user
2480e2703c5fSNithin Dabilpuram */
2481d0641163SNithin Dabilpuram if (lvl == NIX_TXSCH_LVL_SMQ &&
2482d0641163SNithin Dabilpuram nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2483d0641163SNithin Dabilpuram rc = NIX_AF_SMQ_FLUSH_FAILED;
2484d0641163SNithin Dabilpuram goto err;
2485d0641163SNithin Dabilpuram }
2486e2703c5fSNithin Dabilpuram
24876b4b2dedSHariprasad Kelam nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
24886b4b2dedSHariprasad Kelam
2489e2703c5fSNithin Dabilpuram /* Free the resource */
2490e2703c5fSNithin Dabilpuram rvu_free_rsrc(&txsch->schq, schq);
24915d9b976dSSunil Goutham txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2492e2703c5fSNithin Dabilpuram mutex_unlock(&rvu->rsrc_lock);
2493e2703c5fSNithin Dabilpuram return 0;
2494e2703c5fSNithin Dabilpuram err:
2495d0641163SNithin Dabilpuram mutex_unlock(&rvu->rsrc_lock);
2496d0641163SNithin Dabilpuram return rc;
2497e2703c5fSNithin Dabilpuram }
2498e2703c5fSNithin Dabilpuram
rvu_mbox_handler_nix_txsch_free(struct rvu * rvu,struct nix_txsch_free_req * req,struct msg_rsp * rsp)2499eac66686SSunil Goutham int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2500a3e7121cSSunil Goutham struct nix_txsch_free_req *req,
2501a3e7121cSSunil Goutham struct msg_rsp *rsp)
2502a3e7121cSSunil Goutham {
2503e2703c5fSNithin Dabilpuram if (req->flags & TXSCHQ_FREE_ALL)
2504a3e7121cSSunil Goutham return nix_txschq_free(rvu, req->hdr.pcifunc);
2505e2703c5fSNithin Dabilpuram else
2506e2703c5fSNithin Dabilpuram return nix_txschq_free_one(rvu, req);
2507a3e7121cSSunil Goutham }
2508a3e7121cSSunil Goutham
is_txschq_hierarchy_valid(struct rvu * rvu,u16 pcifunc,int blkaddr,int lvl,u64 reg,u64 regval)25095d9b976dSSunil Goutham static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2510b279bbb3SSunil Goutham int lvl, u64 reg, u64 regval)
2511b279bbb3SSunil Goutham {
2512b279bbb3SSunil Goutham u64 regbase = reg & 0xFFFF;
2513b279bbb3SSunil Goutham u16 schq, parent;
2514b279bbb3SSunil Goutham
2515b279bbb3SSunil Goutham if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2516b279bbb3SSunil Goutham return false;
2517b279bbb3SSunil Goutham
2518b279bbb3SSunil Goutham schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2519b279bbb3SSunil Goutham /* Check if this schq belongs to this PF/VF or not */
2520b279bbb3SSunil Goutham if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2521b279bbb3SSunil Goutham return false;
2522b279bbb3SSunil Goutham
2523b279bbb3SSunil Goutham parent = (regval >> 16) & 0x1FF;
2524b279bbb3SSunil Goutham /* Validate MDQ's TL4 parent */
2525b279bbb3SSunil Goutham if (regbase == NIX_AF_MDQX_PARENT(0) &&
2526b279bbb3SSunil Goutham !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2527b279bbb3SSunil Goutham return false;
2528b279bbb3SSunil Goutham
2529b279bbb3SSunil Goutham /* Validate TL4's TL3 parent */
2530b279bbb3SSunil Goutham if (regbase == NIX_AF_TL4X_PARENT(0) &&
2531b279bbb3SSunil Goutham !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2532b279bbb3SSunil Goutham return false;
2533b279bbb3SSunil Goutham
2534b279bbb3SSunil Goutham /* Validate TL3's TL2 parent */
2535b279bbb3SSunil Goutham if (regbase == NIX_AF_TL3X_PARENT(0) &&
2536b279bbb3SSunil Goutham !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2537b279bbb3SSunil Goutham return false;
2538b279bbb3SSunil Goutham
2539b279bbb3SSunil Goutham /* Validate TL2's TL1 parent */
2540b279bbb3SSunil Goutham if (regbase == NIX_AF_TL2X_PARENT(0) &&
2541b279bbb3SSunil Goutham !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2542b279bbb3SSunil Goutham return false;
2543b279bbb3SSunil Goutham
2544b279bbb3SSunil Goutham return true;
2545b279bbb3SSunil Goutham }
2546b279bbb3SSunil Goutham
is_txschq_shaping_valid(struct rvu_hwinfo * hw,int lvl,u64 reg)25475d9b976dSSunil Goutham static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
254826dda7daSNithin Dabilpuram {
25495d9b976dSSunil Goutham u64 regbase;
255026dda7daSNithin Dabilpuram
25515d9b976dSSunil Goutham if (hw->cap.nix_shaping)
25525d9b976dSSunil Goutham return true;
255326dda7daSNithin Dabilpuram
25545d9b976dSSunil Goutham /* If shaping and coloring is not supported, then
25555d9b976dSSunil Goutham * *_CIR and *_PIR registers should not be configured.
25565d9b976dSSunil Goutham */
25575d9b976dSSunil Goutham regbase = reg & 0xFFFF;
255826dda7daSNithin Dabilpuram
25595d9b976dSSunil Goutham switch (lvl) {
25605d9b976dSSunil Goutham case NIX_TXSCH_LVL_TL1:
25615d9b976dSSunil Goutham if (regbase == NIX_AF_TL1X_CIR(0))
25625d9b976dSSunil Goutham return false;
25635d9b976dSSunil Goutham break;
25645d9b976dSSunil Goutham case NIX_TXSCH_LVL_TL2:
25655d9b976dSSunil Goutham if (regbase == NIX_AF_TL2X_CIR(0) ||
25665d9b976dSSunil Goutham regbase == NIX_AF_TL2X_PIR(0))
25675d9b976dSSunil Goutham return false;
25685d9b976dSSunil Goutham break;
25695d9b976dSSunil Goutham case NIX_TXSCH_LVL_TL3:
25705d9b976dSSunil Goutham if (regbase == NIX_AF_TL3X_CIR(0) ||
25715d9b976dSSunil Goutham regbase == NIX_AF_TL3X_PIR(0))
25725d9b976dSSunil Goutham return false;
25735d9b976dSSunil Goutham break;
25745d9b976dSSunil Goutham case NIX_TXSCH_LVL_TL4:
25755d9b976dSSunil Goutham if (regbase == NIX_AF_TL4X_CIR(0) ||
25765d9b976dSSunil Goutham regbase == NIX_AF_TL4X_PIR(0))
25775d9b976dSSunil Goutham return false;
25785d9b976dSSunil Goutham break;
2579d0641163SNithin Dabilpuram case NIX_TXSCH_LVL_MDQ:
2580d0641163SNithin Dabilpuram if (regbase == NIX_AF_MDQX_CIR(0) ||
2581d0641163SNithin Dabilpuram regbase == NIX_AF_MDQX_PIR(0))
2582d0641163SNithin Dabilpuram return false;
2583d0641163SNithin Dabilpuram break;
258426dda7daSNithin Dabilpuram }
25855d9b976dSSunil Goutham return true;
25865d9b976dSSunil Goutham }
25875d9b976dSSunil Goutham
nix_tl1_default_cfg(struct rvu * rvu,struct nix_hw * nix_hw,u16 pcifunc,int blkaddr)25885d9b976dSSunil Goutham static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
25895d9b976dSSunil Goutham u16 pcifunc, int blkaddr)
25905d9b976dSSunil Goutham {
25915d9b976dSSunil Goutham u32 *pfvf_map;
25925d9b976dSSunil Goutham int schq;
25935d9b976dSSunil Goutham
25945d9b976dSSunil Goutham schq = nix_get_tx_link(rvu, pcifunc);
25955d9b976dSSunil Goutham pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
25965d9b976dSSunil Goutham /* Skip if PF has already done the config */
25975d9b976dSSunil Goutham if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
25985d9b976dSSunil Goutham return;
25995d9b976dSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
26005d9b976dSSunil Goutham (TXSCH_TL1_DFLT_RR_PRIO << 1));
260176660df2SSunil Goutham
260276660df2SSunil Goutham /* On OcteonTx2 the config was in bytes and newer silcons
260376660df2SSunil Goutham * it's changed to weight.
260476660df2SSunil Goutham */
260576660df2SSunil Goutham if (!rvu->hw->cap.nix_common_dwrr_mtu)
26065d9b976dSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
26075d9b976dSSunil Goutham TXSCH_TL1_DFLT_RR_QTM);
260876660df2SSunil Goutham else
260976660df2SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
261076660df2SSunil Goutham CN10K_MAX_DWRR_WEIGHT);
261176660df2SSunil Goutham
26125d9b976dSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
26135d9b976dSSunil Goutham pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
261426dda7daSNithin Dabilpuram }
261526dda7daSNithin Dabilpuram
2616d0641163SNithin Dabilpuram /* Register offset - [15:0]
2617d0641163SNithin Dabilpuram * Scheduler Queue number - [25:16]
2618d0641163SNithin Dabilpuram */
2619d0641163SNithin Dabilpuram #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
2620d0641163SNithin Dabilpuram
nix_txschq_cfg_read(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2621d0641163SNithin Dabilpuram static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2622d0641163SNithin Dabilpuram int blkaddr, struct nix_txschq_config *req,
2623d0641163SNithin Dabilpuram struct nix_txschq_config *rsp)
2624d0641163SNithin Dabilpuram {
2625d0641163SNithin Dabilpuram u16 pcifunc = req->hdr.pcifunc;
2626d0641163SNithin Dabilpuram int idx, schq;
2627d0641163SNithin Dabilpuram u64 reg;
2628d0641163SNithin Dabilpuram
2629d0641163SNithin Dabilpuram for (idx = 0; idx < req->num_regs; idx++) {
2630d0641163SNithin Dabilpuram reg = req->reg[idx];
2631d0641163SNithin Dabilpuram reg &= NIX_TX_SCHQ_MASK;
2632d0641163SNithin Dabilpuram schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2633d0641163SNithin Dabilpuram if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2634d0641163SNithin Dabilpuram !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2635d0641163SNithin Dabilpuram return NIX_AF_INVAL_TXSCHQ_CFG;
2636d0641163SNithin Dabilpuram rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2637d0641163SNithin Dabilpuram }
2638d0641163SNithin Dabilpuram rsp->lvl = req->lvl;
2639d0641163SNithin Dabilpuram rsp->num_regs = req->num_regs;
2640d0641163SNithin Dabilpuram return 0;
2641d0641163SNithin Dabilpuram }
2642d0641163SNithin Dabilpuram
rvu_nix_tx_tl2_cfg(struct rvu * rvu,int blkaddr,u16 pcifunc,struct nix_txsch * txsch,bool enable)2643b6a072a1SSubbaraya Sundeep void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
2644b6a072a1SSubbaraya Sundeep struct nix_txsch *txsch, bool enable)
2645fa2bf6baSSubbaraya Sundeep {
2646fa2bf6baSSubbaraya Sundeep struct rvu_hwinfo *hw = rvu->hw;
2647fa2bf6baSSubbaraya Sundeep int lbk_link_start, lbk_links;
2648fa2bf6baSSubbaraya Sundeep u8 pf = rvu_get_pf(pcifunc);
2649fa2bf6baSSubbaraya Sundeep int schq;
2650b6a072a1SSubbaraya Sundeep u64 cfg;
2651fa2bf6baSSubbaraya Sundeep
2652fa2bf6baSSubbaraya Sundeep if (!is_pf_cgxmapped(rvu, pf))
2653fa2bf6baSSubbaraya Sundeep return;
2654fa2bf6baSSubbaraya Sundeep
2655b6a072a1SSubbaraya Sundeep cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
2656fa2bf6baSSubbaraya Sundeep lbk_link_start = hw->cgx_links;
2657fa2bf6baSSubbaraya Sundeep
2658fa2bf6baSSubbaraya Sundeep for (schq = 0; schq < txsch->schq.max; schq++) {
2659fa2bf6baSSubbaraya Sundeep if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2660fa2bf6baSSubbaraya Sundeep continue;
2661fa2bf6baSSubbaraya Sundeep /* Enable all LBK links with channel 63 by default so that
2662fa2bf6baSSubbaraya Sundeep * packets can be sent to LBK with a NPC TX MCAM rule
2663fa2bf6baSSubbaraya Sundeep */
2664fa2bf6baSSubbaraya Sundeep lbk_links = hw->lbk_links;
2665fa2bf6baSSubbaraya Sundeep while (lbk_links--)
2666fa2bf6baSSubbaraya Sundeep rvu_write64(rvu, blkaddr,
2667fa2bf6baSSubbaraya Sundeep NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2668fa2bf6baSSubbaraya Sundeep lbk_link_start +
2669b6a072a1SSubbaraya Sundeep lbk_links), cfg);
2670fa2bf6baSSubbaraya Sundeep }
2671fa2bf6baSSubbaraya Sundeep }
2672fa2bf6baSSubbaraya Sundeep
rvu_mbox_handler_nix_txschq_cfg(struct rvu * rvu,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2673eac66686SSunil Goutham int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2674b279bbb3SSunil Goutham struct nix_txschq_config *req,
2675d0641163SNithin Dabilpuram struct nix_txschq_config *rsp)
2676b279bbb3SSunil Goutham {
2677d0641163SNithin Dabilpuram u64 reg, val, regval, schq_regbase, val_mask;
2678b279bbb3SSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
26795d9b976dSSunil Goutham u16 pcifunc = req->hdr.pcifunc;
2680b279bbb3SSunil Goutham struct nix_txsch *txsch;
2681b279bbb3SSunil Goutham struct nix_hw *nix_hw;
2682b279bbb3SSunil Goutham int blkaddr, idx, err;
26835d9b976dSSunil Goutham int nixlf, schq;
268426dda7daSNithin Dabilpuram u32 *pfvf_map;
2685b279bbb3SSunil Goutham
2686b279bbb3SSunil Goutham if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2687b279bbb3SSunil Goutham req->num_regs > MAX_REGS_PER_MBOX_MSG)
2688b279bbb3SSunil Goutham return NIX_AF_INVAL_TXSCHQ_CFG;
2689b279bbb3SSunil Goutham
269052ccbdacSSunil Goutham err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
26915d9b976dSSunil Goutham if (err)
269252ccbdacSSunil Goutham return err;
2693b279bbb3SSunil Goutham
2694b279bbb3SSunil Goutham nix_hw = get_nix_hw(rvu->hw, blkaddr);
2695b279bbb3SSunil Goutham if (!nix_hw)
26967278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
2697b279bbb3SSunil Goutham
2698d0641163SNithin Dabilpuram if (req->read)
2699d0641163SNithin Dabilpuram return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2700d0641163SNithin Dabilpuram
2701b279bbb3SSunil Goutham txsch = &nix_hw->txsch[req->lvl];
270226dda7daSNithin Dabilpuram pfvf_map = txsch->pfvf_map;
270326dda7daSNithin Dabilpuram
27045d9b976dSSunil Goutham if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
27055d9b976dSSunil Goutham pcifunc & RVU_PFVF_FUNC_MASK) {
27065d9b976dSSunil Goutham mutex_lock(&rvu->rsrc_lock);
27075d9b976dSSunil Goutham if (req->lvl == NIX_TXSCH_LVL_TL1)
27085d9b976dSSunil Goutham nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
27095d9b976dSSunil Goutham mutex_unlock(&rvu->rsrc_lock);
27105d9b976dSSunil Goutham return 0;
271126dda7daSNithin Dabilpuram }
271226dda7daSNithin Dabilpuram
2713b279bbb3SSunil Goutham for (idx = 0; idx < req->num_regs; idx++) {
2714b279bbb3SSunil Goutham reg = req->reg[idx];
2715d0641163SNithin Dabilpuram reg &= NIX_TX_SCHQ_MASK;
2716b279bbb3SSunil Goutham regval = req->regval[idx];
2717b279bbb3SSunil Goutham schq_regbase = reg & 0xFFFF;
2718d0641163SNithin Dabilpuram val_mask = req->regval_mask[idx];
2719b279bbb3SSunil Goutham
27205d9b976dSSunil Goutham if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2721b279bbb3SSunil Goutham txsch->lvl, reg, regval))
2722b279bbb3SSunil Goutham return NIX_AF_INVAL_TXSCHQ_CFG;
2723b279bbb3SSunil Goutham
27245d9b976dSSunil Goutham /* Check if shaping and coloring is supported */
27255d9b976dSSunil Goutham if (!is_txschq_shaping_valid(hw, req->lvl, reg))
27265d9b976dSSunil Goutham continue;
27275d9b976dSSunil Goutham
2728d0641163SNithin Dabilpuram val = rvu_read64(rvu, blkaddr, reg);
2729d0641163SNithin Dabilpuram regval = (val & val_mask) | (regval & ~val_mask);
2730d0641163SNithin Dabilpuram
2731d0641163SNithin Dabilpuram /* Handle shaping state toggle specially */
2732d0641163SNithin Dabilpuram if (hw->cap.nix_shaper_toggle_wait &&
2733d0641163SNithin Dabilpuram handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2734d0641163SNithin Dabilpuram req->lvl, reg, regval))
2735d0641163SNithin Dabilpuram continue;
2736d0641163SNithin Dabilpuram
2737b279bbb3SSunil Goutham /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2738b279bbb3SSunil Goutham if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2739b279bbb3SSunil Goutham nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2740b279bbb3SSunil Goutham pcifunc, 0);
2741b279bbb3SSunil Goutham regval &= ~(0x7FULL << 24);
2742b279bbb3SSunil Goutham regval |= ((u64)nixlf << 24);
2743b279bbb3SSunil Goutham }
2744b279bbb3SSunil Goutham
27455d9b976dSSunil Goutham /* Clear 'BP_ENA' config, if it's not allowed */
27465d9b976dSSunil Goutham if (!hw->cap.nix_tx_link_bp) {
27475d9b976dSSunil Goutham if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
27485d9b976dSSunil Goutham (schq_regbase & 0xFF00) ==
27495d9b976dSSunil Goutham NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
27505d9b976dSSunil Goutham regval &= ~BIT_ULL(13);
27515d9b976dSSunil Goutham }
27525d9b976dSSunil Goutham
275326dda7daSNithin Dabilpuram /* Mark config as done for TL1 by PF */
275426dda7daSNithin Dabilpuram if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
275526dda7daSNithin Dabilpuram schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
275626dda7daSNithin Dabilpuram schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
275726dda7daSNithin Dabilpuram mutex_lock(&rvu->rsrc_lock);
27585d9b976dSSunil Goutham pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
27595d9b976dSSunil Goutham NIX_TXSCHQ_CFG_DONE);
276026dda7daSNithin Dabilpuram mutex_unlock(&rvu->rsrc_lock);
276126dda7daSNithin Dabilpuram }
276226dda7daSNithin Dabilpuram
27635d9b976dSSunil Goutham /* SMQ flush is special hence split register writes such
27645d9b976dSSunil Goutham * that flush first and write rest of the bits later.
27655d9b976dSSunil Goutham */
2766b279bbb3SSunil Goutham if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2767b279bbb3SSunil Goutham (regval & BIT_ULL(49))) {
27685d9b976dSSunil Goutham schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
27695d9b976dSSunil Goutham nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
27705d9b976dSSunil Goutham regval &= ~BIT_ULL(49);
2771b279bbb3SSunil Goutham }
27725d9b976dSSunil Goutham rvu_write64(rvu, blkaddr, reg, regval);
2773b279bbb3SSunil Goutham }
27745d9b976dSSunil Goutham
2775b279bbb3SSunil Goutham return 0;
2776b279bbb3SSunil Goutham }
2777b279bbb3SSunil Goutham
nix_rx_vtag_cfg(struct rvu * rvu,int nixlf,int blkaddr,struct nix_vtag_config * req)2778d02913d9SVamsi Attunuru static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2779d02913d9SVamsi Attunuru struct nix_vtag_config *req)
2780d02913d9SVamsi Attunuru {
278186cea61dSTomasz Duszynski u64 regval = req->vtag_size;
2782d02913d9SVamsi Attunuru
2783fd9d7859SHariprasad Kelam if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2784fd9d7859SHariprasad Kelam req->vtag_size > VTAGSIZE_T8)
2785d02913d9SVamsi Attunuru return -EINVAL;
2786d02913d9SVamsi Attunuru
2787f0c2982aSNaveen Mamindlapalli /* RX VTAG Type 7 reserved for vf vlan */
2788f0c2982aSNaveen Mamindlapalli if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2789f0c2982aSNaveen Mamindlapalli return NIX_AF_ERR_RX_VTAG_INUSE;
2790f0c2982aSNaveen Mamindlapalli
279186cea61dSTomasz Duszynski if (req->rx.capture_vtag)
279286cea61dSTomasz Duszynski regval |= BIT_ULL(5);
279386cea61dSTomasz Duszynski if (req->rx.strip_vtag)
2794d02913d9SVamsi Attunuru regval |= BIT_ULL(4);
2795d02913d9SVamsi Attunuru
2796d02913d9SVamsi Attunuru rvu_write64(rvu, blkaddr,
2797d02913d9SVamsi Attunuru NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2798d02913d9SVamsi Attunuru return 0;
2799d02913d9SVamsi Attunuru }
2800d02913d9SVamsi Attunuru
nix_tx_vtag_free(struct rvu * rvu,int blkaddr,u16 pcifunc,int index)28019a946defSVamsi Attunuru static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
28029a946defSVamsi Attunuru u16 pcifunc, int index)
28039a946defSVamsi Attunuru {
28049a946defSVamsi Attunuru struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
28057278c359SNaveen Mamindlapalli struct nix_txvlan *vlan;
28069a946defSVamsi Attunuru
28077278c359SNaveen Mamindlapalli if (!nix_hw)
28087278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
28097278c359SNaveen Mamindlapalli
28107278c359SNaveen Mamindlapalli vlan = &nix_hw->txvlan;
28119a946defSVamsi Attunuru if (vlan->entry2pfvf_map[index] != pcifunc)
28129a946defSVamsi Attunuru return NIX_AF_ERR_PARAM;
28139a946defSVamsi Attunuru
28149a946defSVamsi Attunuru rvu_write64(rvu, blkaddr,
28159a946defSVamsi Attunuru NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
28169a946defSVamsi Attunuru rvu_write64(rvu, blkaddr,
28179a946defSVamsi Attunuru NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
28189a946defSVamsi Attunuru
28199a946defSVamsi Attunuru vlan->entry2pfvf_map[index] = 0;
28209a946defSVamsi Attunuru rvu_free_rsrc(&vlan->rsrc, index);
28219a946defSVamsi Attunuru
28229a946defSVamsi Attunuru return 0;
28239a946defSVamsi Attunuru }
28249a946defSVamsi Attunuru
nix_free_tx_vtag_entries(struct rvu * rvu,u16 pcifunc)28259a946defSVamsi Attunuru static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
28269a946defSVamsi Attunuru {
28279a946defSVamsi Attunuru struct nix_txvlan *vlan;
28289a946defSVamsi Attunuru struct nix_hw *nix_hw;
28299a946defSVamsi Attunuru int index, blkaddr;
28309a946defSVamsi Attunuru
28319a946defSVamsi Attunuru blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
28329a946defSVamsi Attunuru if (blkaddr < 0)
28339a946defSVamsi Attunuru return;
28349a946defSVamsi Attunuru
28359a946defSVamsi Attunuru nix_hw = get_nix_hw(rvu->hw, blkaddr);
2836c2d4c543SRakesh Babu Saladi if (!nix_hw)
2837c2d4c543SRakesh Babu Saladi return;
2838c2d4c543SRakesh Babu Saladi
28399a946defSVamsi Attunuru vlan = &nix_hw->txvlan;
28409a946defSVamsi Attunuru
28419a946defSVamsi Attunuru mutex_lock(&vlan->rsrc_lock);
28429a946defSVamsi Attunuru /* Scan all the entries and free the ones mapped to 'pcifunc' */
28439a946defSVamsi Attunuru for (index = 0; index < vlan->rsrc.max; index++) {
28449a946defSVamsi Attunuru if (vlan->entry2pfvf_map[index] == pcifunc)
28459a946defSVamsi Attunuru nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
28469a946defSVamsi Attunuru }
28479a946defSVamsi Attunuru mutex_unlock(&vlan->rsrc_lock);
28489a946defSVamsi Attunuru }
28499a946defSVamsi Attunuru
nix_tx_vtag_alloc(struct rvu * rvu,int blkaddr,u64 vtag,u8 size)28509a946defSVamsi Attunuru static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
28519a946defSVamsi Attunuru u64 vtag, u8 size)
28529a946defSVamsi Attunuru {
28539a946defSVamsi Attunuru struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
28547278c359SNaveen Mamindlapalli struct nix_txvlan *vlan;
28559a946defSVamsi Attunuru u64 regval;
28569a946defSVamsi Attunuru int index;
28579a946defSVamsi Attunuru
28587278c359SNaveen Mamindlapalli if (!nix_hw)
28597278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
28607278c359SNaveen Mamindlapalli
28617278c359SNaveen Mamindlapalli vlan = &nix_hw->txvlan;
28627278c359SNaveen Mamindlapalli
28639a946defSVamsi Attunuru mutex_lock(&vlan->rsrc_lock);
28649a946defSVamsi Attunuru
28659a946defSVamsi Attunuru index = rvu_alloc_rsrc(&vlan->rsrc);
28669a946defSVamsi Attunuru if (index < 0) {
28679a946defSVamsi Attunuru mutex_unlock(&vlan->rsrc_lock);
28689a946defSVamsi Attunuru return index;
28699a946defSVamsi Attunuru }
28709a946defSVamsi Attunuru
28719a946defSVamsi Attunuru mutex_unlock(&vlan->rsrc_lock);
28729a946defSVamsi Attunuru
28739a946defSVamsi Attunuru regval = size ? vtag : vtag << 32;
28749a946defSVamsi Attunuru
28759a946defSVamsi Attunuru rvu_write64(rvu, blkaddr,
28769a946defSVamsi Attunuru NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
28779a946defSVamsi Attunuru rvu_write64(rvu, blkaddr,
28789a946defSVamsi Attunuru NIX_AF_TX_VTAG_DEFX_CTL(index), size);
28799a946defSVamsi Attunuru
28809a946defSVamsi Attunuru return index;
28819a946defSVamsi Attunuru }
28829a946defSVamsi Attunuru
nix_tx_vtag_decfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req)28839a946defSVamsi Attunuru static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
28849a946defSVamsi Attunuru struct nix_vtag_config *req)
28859a946defSVamsi Attunuru {
28869a946defSVamsi Attunuru struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
28879a946defSVamsi Attunuru u16 pcifunc = req->hdr.pcifunc;
28889a946defSVamsi Attunuru int idx0 = req->tx.vtag0_idx;
28899a946defSVamsi Attunuru int idx1 = req->tx.vtag1_idx;
28907278c359SNaveen Mamindlapalli struct nix_txvlan *vlan;
2891dd6028a3SColin Ian King int err = 0;
28929a946defSVamsi Attunuru
28937278c359SNaveen Mamindlapalli if (!nix_hw)
28947278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
28957278c359SNaveen Mamindlapalli
28967278c359SNaveen Mamindlapalli vlan = &nix_hw->txvlan;
28979a946defSVamsi Attunuru if (req->tx.free_vtag0 && req->tx.free_vtag1)
28989a946defSVamsi Attunuru if (vlan->entry2pfvf_map[idx0] != pcifunc ||
28999a946defSVamsi Attunuru vlan->entry2pfvf_map[idx1] != pcifunc)
29009a946defSVamsi Attunuru return NIX_AF_ERR_PARAM;
29019a946defSVamsi Attunuru
29029a946defSVamsi Attunuru mutex_lock(&vlan->rsrc_lock);
29039a946defSVamsi Attunuru
29049a946defSVamsi Attunuru if (req->tx.free_vtag0) {
29059a946defSVamsi Attunuru err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
29069a946defSVamsi Attunuru if (err)
29079a946defSVamsi Attunuru goto exit;
29089a946defSVamsi Attunuru }
29099a946defSVamsi Attunuru
29109a946defSVamsi Attunuru if (req->tx.free_vtag1)
29119a946defSVamsi Attunuru err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
29129a946defSVamsi Attunuru
29139a946defSVamsi Attunuru exit:
29149a946defSVamsi Attunuru mutex_unlock(&vlan->rsrc_lock);
29159a946defSVamsi Attunuru return err;
29169a946defSVamsi Attunuru }
29179a946defSVamsi Attunuru
nix_tx_vtag_cfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)29189a946defSVamsi Attunuru static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
29199a946defSVamsi Attunuru struct nix_vtag_config *req,
29209a946defSVamsi Attunuru struct nix_vtag_config_rsp *rsp)
29219a946defSVamsi Attunuru {
29229a946defSVamsi Attunuru struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
29237278c359SNaveen Mamindlapalli struct nix_txvlan *vlan;
29249a946defSVamsi Attunuru u16 pcifunc = req->hdr.pcifunc;
29259a946defSVamsi Attunuru
29267278c359SNaveen Mamindlapalli if (!nix_hw)
29277278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
29287278c359SNaveen Mamindlapalli
29297278c359SNaveen Mamindlapalli vlan = &nix_hw->txvlan;
29309a946defSVamsi Attunuru if (req->tx.cfg_vtag0) {
29319a946defSVamsi Attunuru rsp->vtag0_idx =
29329a946defSVamsi Attunuru nix_tx_vtag_alloc(rvu, blkaddr,
29339a946defSVamsi Attunuru req->tx.vtag0, req->vtag_size);
29349a946defSVamsi Attunuru
29359a946defSVamsi Attunuru if (rsp->vtag0_idx < 0)
29369a946defSVamsi Attunuru return NIX_AF_ERR_TX_VTAG_NOSPC;
29379a946defSVamsi Attunuru
29389a946defSVamsi Attunuru vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
29399a946defSVamsi Attunuru }
29409a946defSVamsi Attunuru
29419a946defSVamsi Attunuru if (req->tx.cfg_vtag1) {
29429a946defSVamsi Attunuru rsp->vtag1_idx =
29439a946defSVamsi Attunuru nix_tx_vtag_alloc(rvu, blkaddr,
29449a946defSVamsi Attunuru req->tx.vtag1, req->vtag_size);
29459a946defSVamsi Attunuru
29469a946defSVamsi Attunuru if (rsp->vtag1_idx < 0)
29479a946defSVamsi Attunuru goto err_free;
29489a946defSVamsi Attunuru
29499a946defSVamsi Attunuru vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
29509a946defSVamsi Attunuru }
29519a946defSVamsi Attunuru
29529a946defSVamsi Attunuru return 0;
29539a946defSVamsi Attunuru
29549a946defSVamsi Attunuru err_free:
29559a946defSVamsi Attunuru if (req->tx.cfg_vtag0)
29569a946defSVamsi Attunuru nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
29579a946defSVamsi Attunuru
29589a946defSVamsi Attunuru return NIX_AF_ERR_TX_VTAG_NOSPC;
29599a946defSVamsi Attunuru }
29609a946defSVamsi Attunuru
rvu_mbox_handler_nix_vtag_cfg(struct rvu * rvu,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)2961eac66686SSunil Goutham int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2962d02913d9SVamsi Attunuru struct nix_vtag_config *req,
29639a946defSVamsi Attunuru struct nix_vtag_config_rsp *rsp)
2964d02913d9SVamsi Attunuru {
2965d02913d9SVamsi Attunuru u16 pcifunc = req->hdr.pcifunc;
2966d02913d9SVamsi Attunuru int blkaddr, nixlf, err;
2967d02913d9SVamsi Attunuru
296852ccbdacSSunil Goutham err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
296952ccbdacSSunil Goutham if (err)
297052ccbdacSSunil Goutham return err;
2971d02913d9SVamsi Attunuru
2972d02913d9SVamsi Attunuru if (req->cfg_type) {
29739a946defSVamsi Attunuru /* rx vtag configuration */
2974d02913d9SVamsi Attunuru err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2975d02913d9SVamsi Attunuru if (err)
2976d02913d9SVamsi Attunuru return NIX_AF_ERR_PARAM;
2977d02913d9SVamsi Attunuru } else {
29789a946defSVamsi Attunuru /* tx vtag configuration */
29799a946defSVamsi Attunuru if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
29809a946defSVamsi Attunuru (req->tx.free_vtag0 || req->tx.free_vtag1))
29819a946defSVamsi Attunuru return NIX_AF_ERR_PARAM;
29829a946defSVamsi Attunuru
29839a946defSVamsi Attunuru if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
29849a946defSVamsi Attunuru return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
29859a946defSVamsi Attunuru
29869a946defSVamsi Attunuru if (req->tx.free_vtag0 || req->tx.free_vtag1)
29879a946defSVamsi Attunuru return nix_tx_vtag_decfg(rvu, blkaddr, req);
2988d02913d9SVamsi Attunuru }
2989d02913d9SVamsi Attunuru
2990d02913d9SVamsi Attunuru return 0;
2991d02913d9SVamsi Attunuru }
2992d02913d9SVamsi Attunuru
nix_blk_setup_mce(struct rvu * rvu,struct nix_hw * nix_hw,int mce,u8 op,u16 pcifunc,int next,bool eol)299355efcc57SSubbaraya Sundeep static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
299455efcc57SSubbaraya Sundeep int mce, u8 op, u16 pcifunc, int next, bool eol)
299552d3d327SSunil Goutham {
299652d3d327SSunil Goutham struct nix_aq_enq_req aq_req;
299752d3d327SSunil Goutham int err;
299852d3d327SSunil Goutham
2999c5e4e4d1SSunil Goutham aq_req.hdr.pcifunc = 0;
300052d3d327SSunil Goutham aq_req.ctype = NIX_AQ_CTYPE_MCE;
300152d3d327SSunil Goutham aq_req.op = op;
300252d3d327SSunil Goutham aq_req.qidx = mce;
300352d3d327SSunil Goutham
3004967db352SNaveen Mamindlapalli /* Use RSS with RSS index 0 */
3005967db352SNaveen Mamindlapalli aq_req.mce.op = 1;
300652d3d327SSunil Goutham aq_req.mce.index = 0;
300752d3d327SSunil Goutham aq_req.mce.eol = eol;
300852d3d327SSunil Goutham aq_req.mce.pf_func = pcifunc;
300952d3d327SSunil Goutham aq_req.mce.next = next;
301052d3d327SSunil Goutham
301152d3d327SSunil Goutham /* All fields valid */
301252d3d327SSunil Goutham *(u64 *)(&aq_req.mce_mask) = ~0ULL;
301352d3d327SSunil Goutham
301455efcc57SSubbaraya Sundeep err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
301552d3d327SSunil Goutham if (err) {
301652d3d327SSunil Goutham dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
301752d3d327SSunil Goutham rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
301852d3d327SSunil Goutham return err;
301952d3d327SSunil Goutham }
302052d3d327SSunil Goutham return 0;
302152d3d327SSunil Goutham }
302252d3d327SSunil Goutham
nix_update_mce_list_entry(struct nix_mce_list * mce_list,u16 pcifunc,bool add)3023967db352SNaveen Mamindlapalli static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
3024561e8752SSunil Goutham u16 pcifunc, bool add)
30254b05528eSSunil Goutham {
30264b05528eSSunil Goutham struct mce *mce, *tail = NULL;
30274b05528eSSunil Goutham bool delete = false;
30284b05528eSSunil Goutham
30294b05528eSSunil Goutham /* Scan through the current list */
30304b05528eSSunil Goutham hlist_for_each_entry(mce, &mce_list->head, node) {
30314b05528eSSunil Goutham /* If already exists, then delete */
30324b05528eSSunil Goutham if (mce->pcifunc == pcifunc && !add) {
30334b05528eSSunil Goutham delete = true;
30344b05528eSSunil Goutham break;
3035967db352SNaveen Mamindlapalli } else if (mce->pcifunc == pcifunc && add) {
3036967db352SNaveen Mamindlapalli /* entry already exists */
3037967db352SNaveen Mamindlapalli return 0;
30384b05528eSSunil Goutham }
30394b05528eSSunil Goutham tail = mce;
30404b05528eSSunil Goutham }
30414b05528eSSunil Goutham
30424b05528eSSunil Goutham if (delete) {
30434b05528eSSunil Goutham hlist_del(&mce->node);
30444b05528eSSunil Goutham kfree(mce);
30454b05528eSSunil Goutham mce_list->count--;
30464b05528eSSunil Goutham return 0;
30474b05528eSSunil Goutham }
30484b05528eSSunil Goutham
30494b05528eSSunil Goutham if (!add)
30504b05528eSSunil Goutham return 0;
30514b05528eSSunil Goutham
30524b05528eSSunil Goutham /* Add a new one to the list, at the tail */
30530964fc8fSStanislaw Kardach mce = kzalloc(sizeof(*mce), GFP_KERNEL);
30544b05528eSSunil Goutham if (!mce)
30554b05528eSSunil Goutham return -ENOMEM;
30564b05528eSSunil Goutham mce->pcifunc = pcifunc;
30574b05528eSSunil Goutham if (!tail)
30584b05528eSSunil Goutham hlist_add_head(&mce->node, &mce_list->head);
30594b05528eSSunil Goutham else
30604b05528eSSunil Goutham hlist_add_behind(&mce->node, &tail->node);
30614b05528eSSunil Goutham mce_list->count++;
30624b05528eSSunil Goutham return 0;
30634b05528eSSunil Goutham }
30644b05528eSSunil Goutham
nix_update_mce_list(struct rvu * rvu,u16 pcifunc,struct nix_mce_list * mce_list,int mce_idx,int mcam_index,bool add)3065967db352SNaveen Mamindlapalli int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
3066967db352SNaveen Mamindlapalli struct nix_mce_list *mce_list,
3067967db352SNaveen Mamindlapalli int mce_idx, int mcam_index, bool add)
30684b05528eSSunil Goutham {
3069967db352SNaveen Mamindlapalli int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
3070967db352SNaveen Mamindlapalli struct npc_mcam *mcam = &rvu->hw->mcam;
30714b05528eSSunil Goutham struct nix_mcast *mcast;
30724b05528eSSunil Goutham struct nix_hw *nix_hw;
3073561e8752SSunil Goutham struct mce *mce;
30744b05528eSSunil Goutham
3075967db352SNaveen Mamindlapalli if (!mce_list)
3076967db352SNaveen Mamindlapalli return -EINVAL;
30774b05528eSSunil Goutham
30784b05528eSSunil Goutham /* Get this PF/VF func's MCE index */
3079967db352SNaveen Mamindlapalli idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
30804b05528eSSunil Goutham
3081967db352SNaveen Mamindlapalli if (idx > (mce_idx + mce_list->max)) {
30824b05528eSSunil Goutham dev_err(rvu->dev,
30834b05528eSSunil Goutham "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
30844b05528eSSunil Goutham __func__, idx, mce_list->max,
30854b05528eSSunil Goutham pcifunc >> RVU_PFVF_PF_SHIFT);
30864b05528eSSunil Goutham return -EINVAL;
30874b05528eSSunil Goutham }
30884b05528eSSunil Goutham
3089967db352SNaveen Mamindlapalli err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
3090967db352SNaveen Mamindlapalli if (err)
3091967db352SNaveen Mamindlapalli return err;
3092967db352SNaveen Mamindlapalli
3093967db352SNaveen Mamindlapalli mcast = &nix_hw->mcast;
30940964fc8fSStanislaw Kardach mutex_lock(&mcast->mce_lock);
30954b05528eSSunil Goutham
3096967db352SNaveen Mamindlapalli err = nix_update_mce_list_entry(mce_list, pcifunc, add);
30974b05528eSSunil Goutham if (err)
30984b05528eSSunil Goutham goto end;
30994b05528eSSunil Goutham
31004b05528eSSunil Goutham /* Disable MCAM entry in NPC */
3101561e8752SSunil Goutham if (!mce_list->count) {
3102967db352SNaveen Mamindlapalli npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3103967db352SNaveen Mamindlapalli npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
31044b05528eSSunil Goutham goto end;
3105561e8752SSunil Goutham }
31064b05528eSSunil Goutham
31074b05528eSSunil Goutham /* Dump the updated list to HW */
3108967db352SNaveen Mamindlapalli idx = mce_idx;
3109561e8752SSunil Goutham last_idx = idx + mce_list->count - 1;
31104b05528eSSunil Goutham hlist_for_each_entry(mce, &mce_list->head, node) {
3111561e8752SSunil Goutham if (idx > last_idx)
3112561e8752SSunil Goutham break;
3113561e8752SSunil Goutham
3114561e8752SSunil Goutham next_idx = idx + 1;
31154b05528eSSunil Goutham /* EOL should be set in last MCE */
311655efcc57SSubbaraya Sundeep err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3117561e8752SSunil Goutham mce->pcifunc, next_idx,
3118561e8752SSunil Goutham (next_idx > last_idx) ? true : false);
31194b05528eSSunil Goutham if (err)
31204b05528eSSunil Goutham goto end;
3121561e8752SSunil Goutham idx++;
31224b05528eSSunil Goutham }
31234b05528eSSunil Goutham
31244b05528eSSunil Goutham end:
31250964fc8fSStanislaw Kardach mutex_unlock(&mcast->mce_lock);
31264b05528eSSunil Goutham return err;
31274b05528eSSunil Goutham }
31284b05528eSSunil Goutham
nix_get_mce_list(struct rvu * rvu,u16 pcifunc,int type,struct nix_mce_list ** mce_list,int * mce_idx)3129967db352SNaveen Mamindlapalli void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
3130967db352SNaveen Mamindlapalli struct nix_mce_list **mce_list, int *mce_idx)
3131967db352SNaveen Mamindlapalli {
3132967db352SNaveen Mamindlapalli struct rvu_hwinfo *hw = rvu->hw;
3133967db352SNaveen Mamindlapalli struct rvu_pfvf *pfvf;
3134967db352SNaveen Mamindlapalli
3135967db352SNaveen Mamindlapalli if (!hw->cap.nix_rx_multicast ||
3136967db352SNaveen Mamindlapalli !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
3137967db352SNaveen Mamindlapalli *mce_list = NULL;
3138967db352SNaveen Mamindlapalli *mce_idx = 0;
3139967db352SNaveen Mamindlapalli return;
3140967db352SNaveen Mamindlapalli }
3141967db352SNaveen Mamindlapalli
3142967db352SNaveen Mamindlapalli /* Get this PF/VF func's MCE index */
3143967db352SNaveen Mamindlapalli pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
3144967db352SNaveen Mamindlapalli
3145967db352SNaveen Mamindlapalli if (type == NIXLF_BCAST_ENTRY) {
3146967db352SNaveen Mamindlapalli *mce_list = &pfvf->bcast_mce_list;
3147967db352SNaveen Mamindlapalli *mce_idx = pfvf->bcast_mce_idx;
3148967db352SNaveen Mamindlapalli } else if (type == NIXLF_ALLMULTI_ENTRY) {
3149967db352SNaveen Mamindlapalli *mce_list = &pfvf->mcast_mce_list;
3150967db352SNaveen Mamindlapalli *mce_idx = pfvf->mcast_mce_idx;
3151967db352SNaveen Mamindlapalli } else if (type == NIXLF_PROMISC_ENTRY) {
3152967db352SNaveen Mamindlapalli *mce_list = &pfvf->promisc_mce_list;
3153967db352SNaveen Mamindlapalli *mce_idx = pfvf->promisc_mce_idx;
3154967db352SNaveen Mamindlapalli } else {
3155967db352SNaveen Mamindlapalli *mce_list = NULL;
3156967db352SNaveen Mamindlapalli *mce_idx = 0;
3157967db352SNaveen Mamindlapalli }
3158967db352SNaveen Mamindlapalli }
3159967db352SNaveen Mamindlapalli
nix_update_mce_rule(struct rvu * rvu,u16 pcifunc,int type,bool add)3160967db352SNaveen Mamindlapalli static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
3161967db352SNaveen Mamindlapalli int type, bool add)
3162967db352SNaveen Mamindlapalli {
3163967db352SNaveen Mamindlapalli int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
3164967db352SNaveen Mamindlapalli struct npc_mcam *mcam = &rvu->hw->mcam;
3165967db352SNaveen Mamindlapalli struct rvu_hwinfo *hw = rvu->hw;
3166967db352SNaveen Mamindlapalli struct nix_mce_list *mce_list;
3167fe1939bbSRadha Mohan Chintakuntla int pf;
3168967db352SNaveen Mamindlapalli
3169fe1939bbSRadha Mohan Chintakuntla /* skip multicast pkt replication for AF's VFs & SDP links */
3170fe1939bbSRadha Mohan Chintakuntla if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
3171967db352SNaveen Mamindlapalli return 0;
3172967db352SNaveen Mamindlapalli
3173967db352SNaveen Mamindlapalli if (!hw->cap.nix_rx_multicast)
3174967db352SNaveen Mamindlapalli return 0;
3175967db352SNaveen Mamindlapalli
3176fe1939bbSRadha Mohan Chintakuntla pf = rvu_get_pf(pcifunc);
3177fe1939bbSRadha Mohan Chintakuntla if (!is_pf_cgxmapped(rvu, pf))
3178fe1939bbSRadha Mohan Chintakuntla return 0;
3179fe1939bbSRadha Mohan Chintakuntla
3180967db352SNaveen Mamindlapalli blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3181967db352SNaveen Mamindlapalli if (blkaddr < 0)
3182967db352SNaveen Mamindlapalli return -EINVAL;
3183967db352SNaveen Mamindlapalli
3184967db352SNaveen Mamindlapalli nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3185967db352SNaveen Mamindlapalli if (nixlf < 0)
3186967db352SNaveen Mamindlapalli return -EINVAL;
3187967db352SNaveen Mamindlapalli
3188967db352SNaveen Mamindlapalli nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
3189967db352SNaveen Mamindlapalli
3190967db352SNaveen Mamindlapalli mcam_index = npc_get_nixlf_mcam_index(mcam,
3191967db352SNaveen Mamindlapalli pcifunc & ~RVU_PFVF_FUNC_MASK,
3192967db352SNaveen Mamindlapalli nixlf, type);
3193967db352SNaveen Mamindlapalli err = nix_update_mce_list(rvu, pcifunc, mce_list,
3194967db352SNaveen Mamindlapalli mce_idx, mcam_index, add);
3195967db352SNaveen Mamindlapalli return err;
3196967db352SNaveen Mamindlapalli }
3197967db352SNaveen Mamindlapalli
nix_setup_mce_tables(struct rvu * rvu,struct nix_hw * nix_hw)3198967db352SNaveen Mamindlapalli static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
319952d3d327SSunil Goutham {
320052d3d327SSunil Goutham struct nix_mcast *mcast = &nix_hw->mcast;
320152d3d327SSunil Goutham int err, pf, numvfs, idx;
320252d3d327SSunil Goutham struct rvu_pfvf *pfvf;
320352d3d327SSunil Goutham u16 pcifunc;
320452d3d327SSunil Goutham u64 cfg;
320552d3d327SSunil Goutham
320652d3d327SSunil Goutham /* Skip PF0 (i.e AF) */
320752d3d327SSunil Goutham for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
320852d3d327SSunil Goutham cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
320952d3d327SSunil Goutham /* If PF is not enabled, nothing to do */
321052d3d327SSunil Goutham if (!((cfg >> 20) & 0x01))
321152d3d327SSunil Goutham continue;
321252d3d327SSunil Goutham /* Get numVFs attached to this PF */
321352d3d327SSunil Goutham numvfs = (cfg >> 12) & 0xFF;
321452d3d327SSunil Goutham
321552d3d327SSunil Goutham pfvf = &rvu->pf[pf];
321655efcc57SSubbaraya Sundeep
321755efcc57SSubbaraya Sundeep /* This NIX0/1 block mapped to PF ? */
321855efcc57SSubbaraya Sundeep if (pfvf->nix_blkaddr != nix_hw->blkaddr)
321955efcc57SSubbaraya Sundeep continue;
322055efcc57SSubbaraya Sundeep
3221967db352SNaveen Mamindlapalli /* save start idx of broadcast mce list */
322252d3d327SSunil Goutham pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
322352d3d327SSunil Goutham nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
322452d3d327SSunil Goutham
3225967db352SNaveen Mamindlapalli /* save start idx of multicast mce list */
3226967db352SNaveen Mamindlapalli pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3227967db352SNaveen Mamindlapalli nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
3228967db352SNaveen Mamindlapalli
3229967db352SNaveen Mamindlapalli /* save the start idx of promisc mce list */
3230967db352SNaveen Mamindlapalli pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3231967db352SNaveen Mamindlapalli nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
3232967db352SNaveen Mamindlapalli
323352d3d327SSunil Goutham for (idx = 0; idx < (numvfs + 1); idx++) {
323452d3d327SSunil Goutham /* idx-0 is for PF, followed by VFs */
323552d3d327SSunil Goutham pcifunc = (pf << RVU_PFVF_PF_SHIFT);
323652d3d327SSunil Goutham pcifunc |= idx;
323752d3d327SSunil Goutham /* Add dummy entries now, so that we don't have to check
323852d3d327SSunil Goutham * for whether AQ_OP should be INIT/WRITE later on.
323952d3d327SSunil Goutham * Will be updated when a NIXLF is attached/detached to
324052d3d327SSunil Goutham * these PF/VFs.
324152d3d327SSunil Goutham */
324255efcc57SSubbaraya Sundeep err = nix_blk_setup_mce(rvu, nix_hw,
324355efcc57SSubbaraya Sundeep pfvf->bcast_mce_idx + idx,
324452d3d327SSunil Goutham NIX_AQ_INSTOP_INIT,
324552d3d327SSunil Goutham pcifunc, 0, true);
324652d3d327SSunil Goutham if (err)
324752d3d327SSunil Goutham return err;
3248967db352SNaveen Mamindlapalli
3249967db352SNaveen Mamindlapalli /* add dummy entries to multicast mce list */
3250967db352SNaveen Mamindlapalli err = nix_blk_setup_mce(rvu, nix_hw,
3251967db352SNaveen Mamindlapalli pfvf->mcast_mce_idx + idx,
3252967db352SNaveen Mamindlapalli NIX_AQ_INSTOP_INIT,
3253967db352SNaveen Mamindlapalli pcifunc, 0, true);
3254967db352SNaveen Mamindlapalli if (err)
3255967db352SNaveen Mamindlapalli return err;
3256967db352SNaveen Mamindlapalli
3257967db352SNaveen Mamindlapalli /* add dummy entries to promisc mce list */
3258967db352SNaveen Mamindlapalli err = nix_blk_setup_mce(rvu, nix_hw,
3259967db352SNaveen Mamindlapalli pfvf->promisc_mce_idx + idx,
3260967db352SNaveen Mamindlapalli NIX_AQ_INSTOP_INIT,
3261967db352SNaveen Mamindlapalli pcifunc, 0, true);
3262967db352SNaveen Mamindlapalli if (err)
3263967db352SNaveen Mamindlapalli return err;
326452d3d327SSunil Goutham }
326552d3d327SSunil Goutham }
326652d3d327SSunil Goutham return 0;
326752d3d327SSunil Goutham }
326852d3d327SSunil Goutham
nix_setup_mcast(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)326952d3d327SSunil Goutham static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
327052d3d327SSunil Goutham {
327152d3d327SSunil Goutham struct nix_mcast *mcast = &nix_hw->mcast;
327252d3d327SSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
327352d3d327SSunil Goutham int err, size;
327452d3d327SSunil Goutham
327552d3d327SSunil Goutham size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
327652d3d327SSunil Goutham size = (1ULL << size);
327752d3d327SSunil Goutham
327852d3d327SSunil Goutham /* Alloc memory for multicast/mirror replication entries */
327952d3d327SSunil Goutham err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
328052d3d327SSunil Goutham (256UL << MC_TBL_SIZE), size);
328152d3d327SSunil Goutham if (err)
328252d3d327SSunil Goutham return -ENOMEM;
328352d3d327SSunil Goutham
328452d3d327SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
328552d3d327SSunil Goutham (u64)mcast->mce_ctx->iova);
328652d3d327SSunil Goutham
328752d3d327SSunil Goutham /* Set max list length equal to max no of VFs per PF + PF itself */
328852d3d327SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
328952d3d327SSunil Goutham BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
329052d3d327SSunil Goutham
329152d3d327SSunil Goutham /* Alloc memory for multicast replication buffers */
329252d3d327SSunil Goutham size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
329352d3d327SSunil Goutham err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
329452d3d327SSunil Goutham (8UL << MC_BUF_CNT), size);
329552d3d327SSunil Goutham if (err)
329652d3d327SSunil Goutham return -ENOMEM;
329752d3d327SSunil Goutham
329852d3d327SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
329952d3d327SSunil Goutham (u64)mcast->mcast_buf->iova);
330052d3d327SSunil Goutham
330152d3d327SSunil Goutham /* Alloc pkind for NIX internal RX multicast/mirror replay */
330252d3d327SSunil Goutham mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
330352d3d327SSunil Goutham
330452d3d327SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
330552d3d327SSunil Goutham BIT_ULL(63) | (mcast->replay_pkind << 24) |
330652d3d327SSunil Goutham BIT_ULL(20) | MC_BUF_CNT);
330752d3d327SSunil Goutham
33080964fc8fSStanislaw Kardach mutex_init(&mcast->mce_lock);
330952d3d327SSunil Goutham
3310967db352SNaveen Mamindlapalli return nix_setup_mce_tables(rvu, nix_hw);
331152d3d327SSunil Goutham }
331252d3d327SSunil Goutham
nix_setup_txvlan(struct rvu * rvu,struct nix_hw * nix_hw)33139a946defSVamsi Attunuru static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
33149a946defSVamsi Attunuru {
33159a946defSVamsi Attunuru struct nix_txvlan *vlan = &nix_hw->txvlan;
33169a946defSVamsi Attunuru int err;
33179a946defSVamsi Attunuru
33189a946defSVamsi Attunuru /* Allocate resource bimap for tx vtag def registers*/
33199a946defSVamsi Attunuru vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
33209a946defSVamsi Attunuru err = rvu_alloc_bitmap(&vlan->rsrc);
33219a946defSVamsi Attunuru if (err)
33229a946defSVamsi Attunuru return -ENOMEM;
33239a946defSVamsi Attunuru
33249a946defSVamsi Attunuru /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
33259a946defSVamsi Attunuru vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
33269a946defSVamsi Attunuru sizeof(u16), GFP_KERNEL);
33279a946defSVamsi Attunuru if (!vlan->entry2pfvf_map)
33289a946defSVamsi Attunuru goto free_mem;
33299a946defSVamsi Attunuru
33309a946defSVamsi Attunuru mutex_init(&vlan->rsrc_lock);
33319a946defSVamsi Attunuru return 0;
33329a946defSVamsi Attunuru
33339a946defSVamsi Attunuru free_mem:
33349a946defSVamsi Attunuru kfree(vlan->rsrc.bmap);
33359a946defSVamsi Attunuru return -ENOMEM;
33369a946defSVamsi Attunuru }
33379a946defSVamsi Attunuru
nix_setup_txschq(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3338709a4f0cSSunil Goutham static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3339709a4f0cSSunil Goutham {
3340709a4f0cSSunil Goutham struct nix_txsch *txsch;
33415d9b976dSSunil Goutham int err, lvl, schq;
3342709a4f0cSSunil Goutham u64 cfg, reg;
3343709a4f0cSSunil Goutham
3344709a4f0cSSunil Goutham /* Get scheduler queue count of each type and alloc
3345709a4f0cSSunil Goutham * bitmap for each for alloc/free/attach operations.
3346709a4f0cSSunil Goutham */
3347709a4f0cSSunil Goutham for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3348709a4f0cSSunil Goutham txsch = &nix_hw->txsch[lvl];
3349709a4f0cSSunil Goutham txsch->lvl = lvl;
3350709a4f0cSSunil Goutham switch (lvl) {
3351709a4f0cSSunil Goutham case NIX_TXSCH_LVL_SMQ:
3352709a4f0cSSunil Goutham reg = NIX_AF_MDQ_CONST;
3353709a4f0cSSunil Goutham break;
3354709a4f0cSSunil Goutham case NIX_TXSCH_LVL_TL4:
3355709a4f0cSSunil Goutham reg = NIX_AF_TL4_CONST;
3356709a4f0cSSunil Goutham break;
3357709a4f0cSSunil Goutham case NIX_TXSCH_LVL_TL3:
3358709a4f0cSSunil Goutham reg = NIX_AF_TL3_CONST;
3359709a4f0cSSunil Goutham break;
3360709a4f0cSSunil Goutham case NIX_TXSCH_LVL_TL2:
3361709a4f0cSSunil Goutham reg = NIX_AF_TL2_CONST;
3362709a4f0cSSunil Goutham break;
3363709a4f0cSSunil Goutham case NIX_TXSCH_LVL_TL1:
3364709a4f0cSSunil Goutham reg = NIX_AF_TL1_CONST;
3365709a4f0cSSunil Goutham break;
3366709a4f0cSSunil Goutham }
3367709a4f0cSSunil Goutham cfg = rvu_read64(rvu, blkaddr, reg);
3368709a4f0cSSunil Goutham txsch->schq.max = cfg & 0xFFFF;
3369709a4f0cSSunil Goutham err = rvu_alloc_bitmap(&txsch->schq);
3370709a4f0cSSunil Goutham if (err)
3371709a4f0cSSunil Goutham return err;
3372709a4f0cSSunil Goutham
3373709a4f0cSSunil Goutham /* Allocate memory for scheduler queues to
3374709a4f0cSSunil Goutham * PF/VF pcifunc mapping info.
3375709a4f0cSSunil Goutham */
3376709a4f0cSSunil Goutham txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
337726dda7daSNithin Dabilpuram sizeof(u32), GFP_KERNEL);
3378709a4f0cSSunil Goutham if (!txsch->pfvf_map)
3379709a4f0cSSunil Goutham return -ENOMEM;
33805d9b976dSSunil Goutham for (schq = 0; schq < txsch->schq.max; schq++)
33815d9b976dSSunil Goutham txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3382709a4f0cSSunil Goutham }
338376660df2SSunil Goutham
338476660df2SSunil Goutham /* Setup a default value of 8192 as DWRR MTU */
3385bbba125eSSunil Goutham if (rvu->hw->cap.nix_common_dwrr_mtu ||
3386bbba125eSSunil Goutham rvu->hw->cap.nix_multiple_dwrr_mtu) {
3387bbba125eSSunil Goutham rvu_write64(rvu, blkaddr,
3388bbba125eSSunil Goutham nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
338976660df2SSunil Goutham convert_bytes_to_dwrr_mtu(8192));
3390bbba125eSSunil Goutham rvu_write64(rvu, blkaddr,
3391bbba125eSSunil Goutham nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
3392bbba125eSSunil Goutham convert_bytes_to_dwrr_mtu(8192));
3393bbba125eSSunil Goutham rvu_write64(rvu, blkaddr,
3394bbba125eSSunil Goutham nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
339576660df2SSunil Goutham convert_bytes_to_dwrr_mtu(8192));
339676660df2SSunil Goutham }
339776660df2SSunil Goutham
3398709a4f0cSSunil Goutham return 0;
3399709a4f0cSSunil Goutham }
3400709a4f0cSSunil Goutham
rvu_nix_reserve_mark_format(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,u32 cfg)3401a27d7659SKrzysztof Kanas int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3402a27d7659SKrzysztof Kanas int blkaddr, u32 cfg)
3403a27d7659SKrzysztof Kanas {
3404a27d7659SKrzysztof Kanas int fmt_idx;
3405a27d7659SKrzysztof Kanas
3406a27d7659SKrzysztof Kanas for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3407a27d7659SKrzysztof Kanas if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3408a27d7659SKrzysztof Kanas return fmt_idx;
3409a27d7659SKrzysztof Kanas }
3410a27d7659SKrzysztof Kanas if (fmt_idx >= nix_hw->mark_format.total)
3411a27d7659SKrzysztof Kanas return -ERANGE;
3412a27d7659SKrzysztof Kanas
3413a27d7659SKrzysztof Kanas rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3414a27d7659SKrzysztof Kanas nix_hw->mark_format.cfg[fmt_idx] = cfg;
3415a27d7659SKrzysztof Kanas nix_hw->mark_format.in_use++;
3416a27d7659SKrzysztof Kanas return fmt_idx;
3417a27d7659SKrzysztof Kanas }
3418a27d7659SKrzysztof Kanas
nix_af_mark_format_setup(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3419a27d7659SKrzysztof Kanas static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3420a27d7659SKrzysztof Kanas int blkaddr)
3421a27d7659SKrzysztof Kanas {
3422a27d7659SKrzysztof Kanas u64 cfgs[] = {
3423a27d7659SKrzysztof Kanas [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
3424a27d7659SKrzysztof Kanas [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
3425a27d7659SKrzysztof Kanas [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
3426a27d7659SKrzysztof Kanas [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
3427a27d7659SKrzysztof Kanas [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
3428a27d7659SKrzysztof Kanas [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
3429a27d7659SKrzysztof Kanas [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
3430a27d7659SKrzysztof Kanas [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
3431a27d7659SKrzysztof Kanas [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3432a27d7659SKrzysztof Kanas };
3433a27d7659SKrzysztof Kanas int i, rc;
3434a27d7659SKrzysztof Kanas u64 total;
3435a27d7659SKrzysztof Kanas
3436a27d7659SKrzysztof Kanas total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3437a27d7659SKrzysztof Kanas nix_hw->mark_format.total = (u8)total;
3438a27d7659SKrzysztof Kanas nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3439a27d7659SKrzysztof Kanas GFP_KERNEL);
3440a27d7659SKrzysztof Kanas if (!nix_hw->mark_format.cfg)
3441a27d7659SKrzysztof Kanas return -ENOMEM;
3442a27d7659SKrzysztof Kanas for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3443a27d7659SKrzysztof Kanas rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3444a27d7659SKrzysztof Kanas if (rc < 0)
3445a27d7659SKrzysztof Kanas dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3446a27d7659SKrzysztof Kanas i, rc);
3447a27d7659SKrzysztof Kanas }
3448a27d7659SKrzysztof Kanas
3449a27d7659SKrzysztof Kanas return 0;
3450a27d7659SKrzysztof Kanas }
3451a27d7659SKrzysztof Kanas
rvu_get_lbk_link_max_frs(struct rvu * rvu,u16 * max_mtu)34526e54e1c5SHariprasad Kelam static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
34536e54e1c5SHariprasad Kelam {
34546e54e1c5SHariprasad Kelam /* CN10K supports LBK FIFO size 72 KB */
34556e54e1c5SHariprasad Kelam if (rvu->hw->lbk_bufsize == 0x12000)
34566e54e1c5SHariprasad Kelam *max_mtu = CN10K_LBK_LINK_MAX_FRS;
34576e54e1c5SHariprasad Kelam else
34586e54e1c5SHariprasad Kelam *max_mtu = NIC_HW_MAX_FRS;
34596e54e1c5SHariprasad Kelam }
34606e54e1c5SHariprasad Kelam
rvu_get_lmac_link_max_frs(struct rvu * rvu,u16 * max_mtu)34616e54e1c5SHariprasad Kelam static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
34626e54e1c5SHariprasad Kelam {
3463b9d0fedcSHariprasad Kelam int fifo_size = rvu_cgx_get_fifolen(rvu);
3464b9d0fedcSHariprasad Kelam
3465b9d0fedcSHariprasad Kelam /* RPM supports FIFO len 128 KB and RPM2 supports double the
3466b9d0fedcSHariprasad Kelam * FIFO len to accommodate 8 LMACS
3467b9d0fedcSHariprasad Kelam */
3468b9d0fedcSHariprasad Kelam if (fifo_size == 0x20000 || fifo_size == 0x40000)
34696e54e1c5SHariprasad Kelam *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
34706e54e1c5SHariprasad Kelam else
34716e54e1c5SHariprasad Kelam *max_mtu = NIC_HW_MAX_FRS;
34726e54e1c5SHariprasad Kelam }
34736e54e1c5SHariprasad Kelam
rvu_mbox_handler_nix_get_hw_info(struct rvu * rvu,struct msg_req * req,struct nix_hw_info * rsp)34746e54e1c5SHariprasad Kelam int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
34756e54e1c5SHariprasad Kelam struct nix_hw_info *rsp)
34766e54e1c5SHariprasad Kelam {
34776e54e1c5SHariprasad Kelam u16 pcifunc = req->hdr.pcifunc;
3478c39830a4SSunil Goutham u64 dwrr_mtu;
34796e54e1c5SHariprasad Kelam int blkaddr;
34806e54e1c5SHariprasad Kelam
34816e54e1c5SHariprasad Kelam blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
34826e54e1c5SHariprasad Kelam if (blkaddr < 0)
34836e54e1c5SHariprasad Kelam return NIX_AF_ERR_AF_LF_INVALID;
34846e54e1c5SHariprasad Kelam
34856e54e1c5SHariprasad Kelam if (is_afvf(pcifunc))
34866e54e1c5SHariprasad Kelam rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
34876e54e1c5SHariprasad Kelam else
34886e54e1c5SHariprasad Kelam rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
34896e54e1c5SHariprasad Kelam
34906e54e1c5SHariprasad Kelam rsp->min_mtu = NIC_HW_MIN_FRS;
3491c39830a4SSunil Goutham
3492bbba125eSSunil Goutham if (!rvu->hw->cap.nix_common_dwrr_mtu &&
3493bbba125eSSunil Goutham !rvu->hw->cap.nix_multiple_dwrr_mtu) {
3494c39830a4SSunil Goutham /* Return '1' on OTx2 */
3495c39830a4SSunil Goutham rsp->rpm_dwrr_mtu = 1;
3496c39830a4SSunil Goutham rsp->sdp_dwrr_mtu = 1;
3497bbba125eSSunil Goutham rsp->lbk_dwrr_mtu = 1;
3498c39830a4SSunil Goutham return 0;
3499c39830a4SSunil Goutham }
3500c39830a4SSunil Goutham
3501bbba125eSSunil Goutham /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3502bbba125eSSunil Goutham dwrr_mtu = rvu_read64(rvu, blkaddr,
3503bbba125eSSunil Goutham nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
3504c39830a4SSunil Goutham rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3505c39830a4SSunil Goutham
3506bbba125eSSunil Goutham dwrr_mtu = rvu_read64(rvu, blkaddr,
3507bbba125eSSunil Goutham nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
3508c39830a4SSunil Goutham rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3509c39830a4SSunil Goutham
3510bbba125eSSunil Goutham dwrr_mtu = rvu_read64(rvu, blkaddr,
3511bbba125eSSunil Goutham nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
3512bbba125eSSunil Goutham rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3513bbba125eSSunil Goutham
35146e54e1c5SHariprasad Kelam return 0;
35156e54e1c5SHariprasad Kelam }
35166e54e1c5SHariprasad Kelam
rvu_mbox_handler_nix_stats_rst(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3517eac66686SSunil Goutham int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
351842349661SVamsi Attunuru struct msg_rsp *rsp)
351942349661SVamsi Attunuru {
352042349661SVamsi Attunuru u16 pcifunc = req->hdr.pcifunc;
352152ccbdacSSunil Goutham int i, nixlf, blkaddr, err;
352242349661SVamsi Attunuru u64 stats;
352342349661SVamsi Attunuru
352452ccbdacSSunil Goutham err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
352552ccbdacSSunil Goutham if (err)
352652ccbdacSSunil Goutham return err;
352742349661SVamsi Attunuru
352842349661SVamsi Attunuru /* Get stats count supported by HW */
352942349661SVamsi Attunuru stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
353042349661SVamsi Attunuru
353142349661SVamsi Attunuru /* Reset tx stats */
353242349661SVamsi Attunuru for (i = 0; i < ((stats >> 24) & 0xFF); i++)
353342349661SVamsi Attunuru rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
353442349661SVamsi Attunuru
353542349661SVamsi Attunuru /* Reset rx stats */
353642349661SVamsi Attunuru for (i = 0; i < ((stats >> 32) & 0xFF); i++)
353742349661SVamsi Attunuru rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
353842349661SVamsi Attunuru
353942349661SVamsi Attunuru return 0;
354042349661SVamsi Attunuru }
354142349661SVamsi Attunuru
3542cc96b0e9SSunil Goutham /* Returns the ALG index to be set into NPC_RX_ACTION */
get_flowkey_alg_idx(struct nix_hw * nix_hw,u32 flow_cfg)35437ee74697SJerin Jacob static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3544cc96b0e9SSunil Goutham {
35457ee74697SJerin Jacob int i;
3546cc96b0e9SSunil Goutham
35477ee74697SJerin Jacob /* Scan over exiting algo entries to find a match */
35487ee74697SJerin Jacob for (i = 0; i < nix_hw->flowkey.in_use; i++)
35497ee74697SJerin Jacob if (nix_hw->flowkey.flowkey[i] == flow_cfg)
35507ee74697SJerin Jacob return i;
3551cc96b0e9SSunil Goutham
35527ee74697SJerin Jacob return -ERANGE;
3553cc96b0e9SSunil Goutham }
3554cc96b0e9SSunil Goutham
35550207c798SKiran Kumar K /* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
35560207c798SKiran Kumar K #define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
35578665fb96SSatheesh Paul /* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
35588665fb96SSatheesh Paul #define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
35590207c798SKiran Kumar K
set_flowkey_fields(struct nix_rx_flowkey_alg * alg,u32 flow_cfg)3560b648366cSJerin Jacob static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
356141a7aa7bSSunil Goutham {
3562b648366cSJerin Jacob int idx, nr_field, key_off, field_marker, keyoff_marker;
3563b648366cSJerin Jacob int max_key_off, max_bit_pos, group_member;
3564b648366cSJerin Jacob struct nix_rx_flowkey_alg *field;
3565b648366cSJerin Jacob struct nix_rx_flowkey_alg tmp;
3566b648366cSJerin Jacob u32 key_type, valid_key;
356779bc788cSKiran Kumar K u32 l3_l4_src_dst;
35688c16cb03SSubbaraya Sundeep int l4_key_offset = 0;
356941a7aa7bSSunil Goutham
357041a7aa7bSSunil Goutham if (!alg)
3571b648366cSJerin Jacob return -EINVAL;
357241a7aa7bSSunil Goutham
3573b648366cSJerin Jacob #define FIELDS_PER_ALG 5
3574b648366cSJerin Jacob #define MAX_KEY_OFF 40
3575b648366cSJerin Jacob /* Clear all fields */
3576b648366cSJerin Jacob memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3577b648366cSJerin Jacob
3578b648366cSJerin Jacob /* Each of the 32 possible flow key algorithm definitions should
357941a7aa7bSSunil Goutham * fall into above incremental config (except ALG0). Otherwise a
358041a7aa7bSSunil Goutham * single NPC MCAM entry is not sufficient for supporting RSS.
358141a7aa7bSSunil Goutham *
358241a7aa7bSSunil Goutham * If a different definition or combination needed then NPC MCAM
358341a7aa7bSSunil Goutham * has to be programmed to filter such pkts and it's action should
358441a7aa7bSSunil Goutham * point to this definition to calculate flowtag or hash.
3585b648366cSJerin Jacob *
3586b648366cSJerin Jacob * The `for loop` goes over _all_ protocol field and the following
3587b648366cSJerin Jacob * variables depicts the state machine forward progress logic.
3588b648366cSJerin Jacob *
3589b648366cSJerin Jacob * keyoff_marker - Enabled when hash byte length needs to be accounted
3590b648366cSJerin Jacob * in field->key_offset update.
3591b648366cSJerin Jacob * field_marker - Enabled when a new field needs to be selected.
3592b648366cSJerin Jacob * group_member - Enabled when protocol is part of a group.
359341a7aa7bSSunil Goutham */
3594b648366cSJerin Jacob
359579bc788cSKiran Kumar K /* Last 4 bits (31:28) are reserved to specify SRC, DST
359679bc788cSKiran Kumar K * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
359779bc788cSKiran Kumar K * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
359879bc788cSKiran Kumar K * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
359979bc788cSKiran Kumar K */
360079bc788cSKiran Kumar K l3_l4_src_dst = flow_cfg;
360179bc788cSKiran Kumar K /* Reset these 4 bits, so that these won't be part of key */
360279bc788cSKiran Kumar K flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
360379bc788cSKiran Kumar K
3604b648366cSJerin Jacob keyoff_marker = 0; max_key_off = 0; group_member = 0;
3605b648366cSJerin Jacob nr_field = 0; key_off = 0; field_marker = 1;
3606b648366cSJerin Jacob field = &tmp; max_bit_pos = fls(flow_cfg);
3607b648366cSJerin Jacob for (idx = 0;
3608b648366cSJerin Jacob idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
3609b648366cSJerin Jacob key_off < MAX_KEY_OFF; idx++) {
3610b648366cSJerin Jacob key_type = BIT(idx);
3611b648366cSJerin Jacob valid_key = flow_cfg & key_type;
3612b648366cSJerin Jacob /* Found a field marker, reset the field values */
3613b648366cSJerin Jacob if (field_marker)
3614b648366cSJerin Jacob memset(&tmp, 0, sizeof(tmp));
3615b648366cSJerin Jacob
3616206ff848SKiran Kumar K field_marker = true;
3617206ff848SKiran Kumar K keyoff_marker = true;
361841a7aa7bSSunil Goutham switch (key_type) {
3619bd522d68SJerin Jacob case NIX_FLOW_KEY_TYPE_PORT:
362041a7aa7bSSunil Goutham field->sel_chan = true;
362141a7aa7bSSunil Goutham /* This should be set to 1, when SEL_CHAN is set */
362241a7aa7bSSunil Goutham field->bytesm1 = 1;
362341a7aa7bSSunil Goutham break;
3624f9e425e9SGeorge Cherian case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
3625f9e425e9SGeorge Cherian field->lid = NPC_LID_LC;
3626f9e425e9SGeorge Cherian field->hdr_offset = 9; /* offset */
3627f9e425e9SGeorge Cherian field->bytesm1 = 0; /* 1 byte */
3628f9e425e9SGeorge Cherian field->ltype_match = NPC_LT_LC_IP;
36298665fb96SSatheesh Paul field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
3630f9e425e9SGeorge Cherian break;
3631bd522d68SJerin Jacob case NIX_FLOW_KEY_TYPE_IPV4:
3632206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_INNR_IPV4:
363341a7aa7bSSunil Goutham field->lid = NPC_LID_LC;
363441a7aa7bSSunil Goutham field->ltype_match = NPC_LT_LC_IP;
3635206ff848SKiran Kumar K if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
3636206ff848SKiran Kumar K field->lid = NPC_LID_LG;
3637206ff848SKiran Kumar K field->ltype_match = NPC_LT_LG_TU_IP;
3638206ff848SKiran Kumar K }
363941a7aa7bSSunil Goutham field->hdr_offset = 12; /* SIP offset */
364041a7aa7bSSunil Goutham field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
364179bc788cSKiran Kumar K
364279bc788cSKiran Kumar K /* Only SIP */
364379bc788cSKiran Kumar K if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
364479bc788cSKiran Kumar K field->bytesm1 = 3; /* SIP, 4 bytes */
364579bc788cSKiran Kumar K
364679bc788cSKiran Kumar K if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
364779bc788cSKiran Kumar K /* Both SIP + DIP */
364879bc788cSKiran Kumar K if (field->bytesm1 == 3) {
364979bc788cSKiran Kumar K field->bytesm1 = 7; /* SIP + DIP, 8B */
365079bc788cSKiran Kumar K } else {
365179bc788cSKiran Kumar K /* Only DIP */
365279bc788cSKiran Kumar K field->hdr_offset = 16; /* DIP off */
365379bc788cSKiran Kumar K field->bytesm1 = 3; /* DIP, 4 bytes */
365479bc788cSKiran Kumar K }
365579bc788cSKiran Kumar K }
36568665fb96SSatheesh Paul field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
3657b648366cSJerin Jacob keyoff_marker = false;
365841a7aa7bSSunil Goutham break;
3659bd522d68SJerin Jacob case NIX_FLOW_KEY_TYPE_IPV6:
3660206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_INNR_IPV6:
366141a7aa7bSSunil Goutham field->lid = NPC_LID_LC;
366241a7aa7bSSunil Goutham field->ltype_match = NPC_LT_LC_IP6;
3663206ff848SKiran Kumar K if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
3664206ff848SKiran Kumar K field->lid = NPC_LID_LG;
3665206ff848SKiran Kumar K field->ltype_match = NPC_LT_LG_TU_IP6;
3666206ff848SKiran Kumar K }
366741a7aa7bSSunil Goutham field->hdr_offset = 8; /* SIP offset */
366841a7aa7bSSunil Goutham field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
366979bc788cSKiran Kumar K
367079bc788cSKiran Kumar K /* Only SIP */
367179bc788cSKiran Kumar K if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
367279bc788cSKiran Kumar K field->bytesm1 = 15; /* SIP, 16 bytes */
367379bc788cSKiran Kumar K
367479bc788cSKiran Kumar K if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
367579bc788cSKiran Kumar K /* Both SIP + DIP */
367679bc788cSKiran Kumar K if (field->bytesm1 == 15) {
367779bc788cSKiran Kumar K /* SIP + DIP, 32 bytes */
367879bc788cSKiran Kumar K field->bytesm1 = 31;
367979bc788cSKiran Kumar K } else {
368079bc788cSKiran Kumar K /* Only DIP */
368179bc788cSKiran Kumar K field->hdr_offset = 24; /* DIP off */
368279bc788cSKiran Kumar K field->bytesm1 = 15; /* DIP,16 bytes */
368379bc788cSKiran Kumar K }
368479bc788cSKiran Kumar K }
36850207c798SKiran Kumar K field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
368641a7aa7bSSunil Goutham break;
3687bd522d68SJerin Jacob case NIX_FLOW_KEY_TYPE_TCP:
3688bd522d68SJerin Jacob case NIX_FLOW_KEY_TYPE_UDP:
3689bd522d68SJerin Jacob case NIX_FLOW_KEY_TYPE_SCTP:
3690206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_INNR_TCP:
3691206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_INNR_UDP:
3692206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_INNR_SCTP:
369341a7aa7bSSunil Goutham field->lid = NPC_LID_LD;
3694206ff848SKiran Kumar K if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
3695206ff848SKiran Kumar K key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
3696206ff848SKiran Kumar K key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
3697206ff848SKiran Kumar K field->lid = NPC_LID_LH;
369841a7aa7bSSunil Goutham field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
3699206ff848SKiran Kumar K
370079bc788cSKiran Kumar K if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
370179bc788cSKiran Kumar K field->bytesm1 = 1; /* SRC, 2 bytes */
370279bc788cSKiran Kumar K
370379bc788cSKiran Kumar K if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
370479bc788cSKiran Kumar K /* Both SRC + DST */
370579bc788cSKiran Kumar K if (field->bytesm1 == 1) {
370679bc788cSKiran Kumar K /* SRC + DST, 4 bytes */
370779bc788cSKiran Kumar K field->bytesm1 = 3;
370879bc788cSKiran Kumar K } else {
370979bc788cSKiran Kumar K /* Only DIP */
371079bc788cSKiran Kumar K field->hdr_offset = 2; /* DST off */
371179bc788cSKiran Kumar K field->bytesm1 = 1; /* DST, 2 bytes */
371279bc788cSKiran Kumar K }
371379bc788cSKiran Kumar K }
371479bc788cSKiran Kumar K
3715206ff848SKiran Kumar K /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
3716206ff848SKiran Kumar K * so no need to change the ltype_match, just change
3717206ff848SKiran Kumar K * the lid for inner protocols
3718206ff848SKiran Kumar K */
3719206ff848SKiran Kumar K BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
3720206ff848SKiran Kumar K (int)NPC_LT_LH_TU_TCP);
3721206ff848SKiran Kumar K BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
3722206ff848SKiran Kumar K (int)NPC_LT_LH_TU_UDP);
3723206ff848SKiran Kumar K BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
3724206ff848SKiran Kumar K (int)NPC_LT_LH_TU_SCTP);
3725206ff848SKiran Kumar K
3726206ff848SKiran Kumar K if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
3727206ff848SKiran Kumar K key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
3728206ff848SKiran Kumar K valid_key) {
372941a7aa7bSSunil Goutham field->ltype_match |= NPC_LT_LD_TCP;
3730b648366cSJerin Jacob group_member = true;
3731206ff848SKiran Kumar K } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
3732206ff848SKiran Kumar K key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
3733b648366cSJerin Jacob valid_key) {
373441a7aa7bSSunil Goutham field->ltype_match |= NPC_LT_LD_UDP;
3735b648366cSJerin Jacob group_member = true;
3736206ff848SKiran Kumar K } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3737206ff848SKiran Kumar K key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
3738b648366cSJerin Jacob valid_key) {
373941a7aa7bSSunil Goutham field->ltype_match |= NPC_LT_LD_SCTP;
3740b648366cSJerin Jacob group_member = true;
3741b648366cSJerin Jacob }
374241a7aa7bSSunil Goutham field->ltype_mask = ~field->ltype_match;
3743206ff848SKiran Kumar K if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3744206ff848SKiran Kumar K key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
3745b648366cSJerin Jacob /* Handle the case where any of the group item
3746b648366cSJerin Jacob * is enabled in the group but not the final one
3747b648366cSJerin Jacob */
3748b648366cSJerin Jacob if (group_member) {
3749b648366cSJerin Jacob valid_key = true;
3750b648366cSJerin Jacob group_member = false;
3751b648366cSJerin Jacob }
3752b648366cSJerin Jacob } else {
3753b648366cSJerin Jacob field_marker = false;
3754b648366cSJerin Jacob keyoff_marker = false;
3755b648366cSJerin Jacob }
3756b9b7421aSSubbaraya Sundeep
3757b9b7421aSSubbaraya Sundeep /* TCP/UDP/SCTP and ESP/AH falls at same offset so
3758b9b7421aSSubbaraya Sundeep * remember the TCP key offset of 40 byte hash key.
3759b9b7421aSSubbaraya Sundeep */
3760b9b7421aSSubbaraya Sundeep if (key_type == NIX_FLOW_KEY_TYPE_TCP)
3761b9b7421aSSubbaraya Sundeep l4_key_offset = key_off;
376241a7aa7bSSunil Goutham break;
3763206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_NVGRE:
3764206ff848SKiran Kumar K field->lid = NPC_LID_LD;
3765206ff848SKiran Kumar K field->hdr_offset = 4; /* VSID offset */
3766206ff848SKiran Kumar K field->bytesm1 = 2;
3767206ff848SKiran Kumar K field->ltype_match = NPC_LT_LD_NVGRE;
3768206ff848SKiran Kumar K field->ltype_mask = 0xF;
3769206ff848SKiran Kumar K break;
3770206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_VXLAN:
3771206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_GENEVE:
3772206ff848SKiran Kumar K field->lid = NPC_LID_LE;
3773206ff848SKiran Kumar K field->bytesm1 = 2;
3774206ff848SKiran Kumar K field->hdr_offset = 4;
3775206ff848SKiran Kumar K field->ltype_mask = 0xF;
3776206ff848SKiran Kumar K field_marker = false;
3777206ff848SKiran Kumar K keyoff_marker = false;
3778206ff848SKiran Kumar K
3779206ff848SKiran Kumar K if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
3780206ff848SKiran Kumar K field->ltype_match |= NPC_LT_LE_VXLAN;
3781206ff848SKiran Kumar K group_member = true;
3782206ff848SKiran Kumar K }
3783206ff848SKiran Kumar K
3784206ff848SKiran Kumar K if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
3785206ff848SKiran Kumar K field->ltype_match |= NPC_LT_LE_GENEVE;
3786206ff848SKiran Kumar K group_member = true;
3787206ff848SKiran Kumar K }
3788206ff848SKiran Kumar K
3789206ff848SKiran Kumar K if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
3790206ff848SKiran Kumar K if (group_member) {
3791206ff848SKiran Kumar K field->ltype_mask = ~field->ltype_match;
3792206ff848SKiran Kumar K field_marker = true;
3793206ff848SKiran Kumar K keyoff_marker = true;
3794206ff848SKiran Kumar K valid_key = true;
3795206ff848SKiran Kumar K group_member = false;
3796206ff848SKiran Kumar K }
3797206ff848SKiran Kumar K }
3798206ff848SKiran Kumar K break;
3799206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_ETH_DMAC:
3800206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
3801206ff848SKiran Kumar K field->lid = NPC_LID_LA;
3802206ff848SKiran Kumar K field->ltype_match = NPC_LT_LA_ETHER;
3803206ff848SKiran Kumar K if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
3804206ff848SKiran Kumar K field->lid = NPC_LID_LF;
3805206ff848SKiran Kumar K field->ltype_match = NPC_LT_LF_TU_ETHER;
3806206ff848SKiran Kumar K }
3807206ff848SKiran Kumar K field->hdr_offset = 0;
3808206ff848SKiran Kumar K field->bytesm1 = 5; /* DMAC 6 Byte */
3809206ff848SKiran Kumar K field->ltype_mask = 0xF;
3810206ff848SKiran Kumar K break;
3811206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_IPV6_EXT:
3812206ff848SKiran Kumar K field->lid = NPC_LID_LC;
3813206ff848SKiran Kumar K field->hdr_offset = 40; /* IPV6 hdr */
3814206ff848SKiran Kumar K field->bytesm1 = 0; /* 1 Byte ext hdr*/
3815206ff848SKiran Kumar K field->ltype_match = NPC_LT_LC_IP6_EXT;
3816206ff848SKiran Kumar K field->ltype_mask = 0xF;
3817206ff848SKiran Kumar K break;
3818206ff848SKiran Kumar K case NIX_FLOW_KEY_TYPE_GTPU:
3819206ff848SKiran Kumar K field->lid = NPC_LID_LE;
3820206ff848SKiran Kumar K field->hdr_offset = 4;
3821206ff848SKiran Kumar K field->bytesm1 = 3; /* 4 bytes TID*/
3822206ff848SKiran Kumar K field->ltype_match = NPC_LT_LE_GTPU;
3823206ff848SKiran Kumar K field->ltype_mask = 0xF;
3824206ff848SKiran Kumar K break;
38258f900363SGeorge Cherian case NIX_FLOW_KEY_TYPE_VLAN:
38268f900363SGeorge Cherian field->lid = NPC_LID_LB;
38278f900363SGeorge Cherian field->hdr_offset = 2; /* Skip TPID (2-bytes) */
38288f900363SGeorge Cherian field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
38298f900363SGeorge Cherian field->ltype_match = NPC_LT_LB_CTAG;
38308f900363SGeorge Cherian field->ltype_mask = 0xF;
38318f900363SGeorge Cherian field->fn_mask = 1; /* Mask out the first nibble */
38328f900363SGeorge Cherian break;
3833b9b7421aSSubbaraya Sundeep case NIX_FLOW_KEY_TYPE_AH:
3834b9b7421aSSubbaraya Sundeep case NIX_FLOW_KEY_TYPE_ESP:
3835b9b7421aSSubbaraya Sundeep field->hdr_offset = 0;
3836b9b7421aSSubbaraya Sundeep field->bytesm1 = 7; /* SPI + sequence number */
3837b9b7421aSSubbaraya Sundeep field->ltype_mask = 0xF;
3838b9b7421aSSubbaraya Sundeep field->lid = NPC_LID_LE;
3839b9b7421aSSubbaraya Sundeep field->ltype_match = NPC_LT_LE_ESP;
3840b9b7421aSSubbaraya Sundeep if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3841b9b7421aSSubbaraya Sundeep field->lid = NPC_LID_LD;
3842b9b7421aSSubbaraya Sundeep field->ltype_match = NPC_LT_LD_AH;
3843b9b7421aSSubbaraya Sundeep field->hdr_offset = 4;
3844b9b7421aSSubbaraya Sundeep keyoff_marker = false;
3845b9b7421aSSubbaraya Sundeep }
3846b9b7421aSSubbaraya Sundeep break;
384741a7aa7bSSunil Goutham }
384841a7aa7bSSunil Goutham field->ena = 1;
3849b648366cSJerin Jacob
3850b648366cSJerin Jacob /* Found a valid flow key type */
3851b648366cSJerin Jacob if (valid_key) {
3852b9b7421aSSubbaraya Sundeep /* Use the key offset of TCP/UDP/SCTP fields
3853b9b7421aSSubbaraya Sundeep * for ESP/AH fields.
3854b9b7421aSSubbaraya Sundeep */
3855b9b7421aSSubbaraya Sundeep if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3856b9b7421aSSubbaraya Sundeep key_type == NIX_FLOW_KEY_TYPE_AH)
3857b9b7421aSSubbaraya Sundeep key_off = l4_key_offset;
3858b648366cSJerin Jacob field->key_offset = key_off;
3859b648366cSJerin Jacob memcpy(&alg[nr_field], field, sizeof(*field));
3860b648366cSJerin Jacob max_key_off = max(max_key_off, field->bytesm1 + 1);
3861b648366cSJerin Jacob
3862b648366cSJerin Jacob /* Found a field marker, get the next field */
3863b648366cSJerin Jacob if (field_marker)
3864b648366cSJerin Jacob nr_field++;
386541a7aa7bSSunil Goutham }
3866b648366cSJerin Jacob
3867b648366cSJerin Jacob /* Found a keyoff marker, update the new key_off */
3868b648366cSJerin Jacob if (keyoff_marker) {
3869b648366cSJerin Jacob key_off += max_key_off;
3870b648366cSJerin Jacob max_key_off = 0;
3871b648366cSJerin Jacob }
3872b648366cSJerin Jacob }
3873b648366cSJerin Jacob /* Processed all the flow key types */
3874b648366cSJerin Jacob if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3875b648366cSJerin Jacob return 0;
3876b648366cSJerin Jacob else
3877b648366cSJerin Jacob return NIX_AF_ERR_RSS_NOSPC_FIELD;
387841a7aa7bSSunil Goutham }
387941a7aa7bSSunil Goutham
reserve_flowkey_alg_idx(struct rvu * rvu,int blkaddr,u32 flow_cfg)38807ee74697SJerin Jacob static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
388141a7aa7bSSunil Goutham {
38827ee74697SJerin Jacob u64 field[FIELDS_PER_ALG];
38837ee74697SJerin Jacob struct nix_hw *hw;
38847ee74697SJerin Jacob int fid, rc;
38857ee74697SJerin Jacob
38867ee74697SJerin Jacob hw = get_nix_hw(rvu->hw, blkaddr);
38877ee74697SJerin Jacob if (!hw)
38887278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
38897ee74697SJerin Jacob
38907ee74697SJerin Jacob /* No room to add new flow hash algoritham */
38917ee74697SJerin Jacob if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
38927ee74697SJerin Jacob return NIX_AF_ERR_RSS_NOSPC_ALGO;
38937ee74697SJerin Jacob
38947ee74697SJerin Jacob /* Generate algo fields for the given flow_cfg */
38957ee74697SJerin Jacob rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
38967ee74697SJerin Jacob if (rc)
38977ee74697SJerin Jacob return rc;
38987ee74697SJerin Jacob
38997ee74697SJerin Jacob /* Update ALGX_FIELDX register with generated fields */
39007ee74697SJerin Jacob for (fid = 0; fid < FIELDS_PER_ALG; fid++)
39017ee74697SJerin Jacob rvu_write64(rvu, blkaddr,
39027ee74697SJerin Jacob NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
39037ee74697SJerin Jacob fid), field[fid]);
39047ee74697SJerin Jacob
39057ee74697SJerin Jacob /* Store the flow_cfg for futher lookup */
39067ee74697SJerin Jacob rc = hw->flowkey.in_use;
39077ee74697SJerin Jacob hw->flowkey.flowkey[rc] = flow_cfg;
39087ee74697SJerin Jacob hw->flowkey.in_use++;
39097ee74697SJerin Jacob
39107ee74697SJerin Jacob return rc;
39117ee74697SJerin Jacob }
39127ee74697SJerin Jacob
rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu * rvu,struct nix_rss_flowkey_cfg * req,struct nix_rss_flowkey_cfg_rsp * rsp)39137ee74697SJerin Jacob int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
39147ee74697SJerin Jacob struct nix_rss_flowkey_cfg *req,
39157ee74697SJerin Jacob struct nix_rss_flowkey_cfg_rsp *rsp)
39167ee74697SJerin Jacob {
39177ee74697SJerin Jacob u16 pcifunc = req->hdr.pcifunc;
39187ee74697SJerin Jacob int alg_idx, nixlf, blkaddr;
39197ee74697SJerin Jacob struct nix_hw *nix_hw;
392052ccbdacSSunil Goutham int err;
39217ee74697SJerin Jacob
392252ccbdacSSunil Goutham err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
392352ccbdacSSunil Goutham if (err)
392452ccbdacSSunil Goutham return err;
39257ee74697SJerin Jacob
39267ee74697SJerin Jacob nix_hw = get_nix_hw(rvu->hw, blkaddr);
39277ee74697SJerin Jacob if (!nix_hw)
39287278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
39297ee74697SJerin Jacob
39307ee74697SJerin Jacob alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
39317ee74697SJerin Jacob /* Failed to get algo index from the exiting list, reserve new */
39327ee74697SJerin Jacob if (alg_idx < 0) {
39337ee74697SJerin Jacob alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
39347ee74697SJerin Jacob req->flowkey_cfg);
39357ee74697SJerin Jacob if (alg_idx < 0)
39367ee74697SJerin Jacob return alg_idx;
39377ee74697SJerin Jacob }
39387ee74697SJerin Jacob rsp->alg_idx = alg_idx;
39397ee74697SJerin Jacob rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
39407ee74697SJerin Jacob alg_idx, req->mcam_index);
39417ee74697SJerin Jacob return 0;
39427ee74697SJerin Jacob }
39437ee74697SJerin Jacob
nix_rx_flowkey_alg_cfg(struct rvu * rvu,int blkaddr)39447ee74697SJerin Jacob static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
39457ee74697SJerin Jacob {
394641a7aa7bSSunil Goutham u32 flowkey_cfg, minkey_cfg;
39477ee74697SJerin Jacob int alg, fid, rc;
394841a7aa7bSSunil Goutham
39497ee74697SJerin Jacob /* Disable all flow key algx fieldx */
3950bd522d68SJerin Jacob for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
395141a7aa7bSSunil Goutham for (fid = 0; fid < FIELDS_PER_ALG; fid++)
395241a7aa7bSSunil Goutham rvu_write64(rvu, blkaddr,
395341a7aa7bSSunil Goutham NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
39547ee74697SJerin Jacob 0);
395541a7aa7bSSunil Goutham }
39567ee74697SJerin Jacob
39577ee74697SJerin Jacob /* IPv4/IPv6 SIP/DIPs */
39587ee74697SJerin Jacob flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
39597ee74697SJerin Jacob rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
39607ee74697SJerin Jacob if (rc < 0)
39617ee74697SJerin Jacob return rc;
39627ee74697SJerin Jacob
39637ee74697SJerin Jacob /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
39647ee74697SJerin Jacob minkey_cfg = flowkey_cfg;
39657ee74697SJerin Jacob flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
39667ee74697SJerin Jacob rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
39677ee74697SJerin Jacob if (rc < 0)
39687ee74697SJerin Jacob return rc;
39697ee74697SJerin Jacob
39707ee74697SJerin Jacob /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
39717ee74697SJerin Jacob flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
39727ee74697SJerin Jacob rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
39737ee74697SJerin Jacob if (rc < 0)
39747ee74697SJerin Jacob return rc;
39757ee74697SJerin Jacob
39767ee74697SJerin Jacob /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
39777ee74697SJerin Jacob flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
39787ee74697SJerin Jacob rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
39797ee74697SJerin Jacob if (rc < 0)
39807ee74697SJerin Jacob return rc;
39817ee74697SJerin Jacob
39827ee74697SJerin Jacob /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
39837ee74697SJerin Jacob flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
39847ee74697SJerin Jacob NIX_FLOW_KEY_TYPE_UDP;
39857ee74697SJerin Jacob rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
39867ee74697SJerin Jacob if (rc < 0)
39877ee74697SJerin Jacob return rc;
39887ee74697SJerin Jacob
39897ee74697SJerin Jacob /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
39907ee74697SJerin Jacob flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
39917ee74697SJerin Jacob NIX_FLOW_KEY_TYPE_SCTP;
39927ee74697SJerin Jacob rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
39937ee74697SJerin Jacob if (rc < 0)
39947ee74697SJerin Jacob return rc;
39957ee74697SJerin Jacob
39967ee74697SJerin Jacob /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
39977ee74697SJerin Jacob flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
39987ee74697SJerin Jacob NIX_FLOW_KEY_TYPE_SCTP;
39997ee74697SJerin Jacob rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
40007ee74697SJerin Jacob if (rc < 0)
40017ee74697SJerin Jacob return rc;
40027ee74697SJerin Jacob
40037ee74697SJerin Jacob /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
40047ee74697SJerin Jacob flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
40057ee74697SJerin Jacob NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
40067ee74697SJerin Jacob rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
40077ee74697SJerin Jacob if (rc < 0)
40087ee74697SJerin Jacob return rc;
40097ee74697SJerin Jacob
40107ee74697SJerin Jacob return 0;
401141a7aa7bSSunil Goutham }
401241a7aa7bSSunil Goutham
rvu_mbox_handler_nix_set_mac_addr(struct rvu * rvu,struct nix_set_mac_addr * req,struct msg_rsp * rsp)4013eac66686SSunil Goutham int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
40146f03cf10SSunil Goutham struct nix_set_mac_addr *req,
40156f03cf10SSunil Goutham struct msg_rsp *rsp)
40166f03cf10SSunil Goutham {
4017f0c2982aSNaveen Mamindlapalli bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
40186f03cf10SSunil Goutham u16 pcifunc = req->hdr.pcifunc;
401952ccbdacSSunil Goutham int blkaddr, nixlf, err;
40206f03cf10SSunil Goutham struct rvu_pfvf *pfvf;
402152ccbdacSSunil Goutham
402252ccbdacSSunil Goutham err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
402352ccbdacSSunil Goutham if (err)
402452ccbdacSSunil Goutham return err;
40256f03cf10SSunil Goutham
40266f03cf10SSunil Goutham pfvf = rvu_get_pfvf(rvu, pcifunc);
40276f03cf10SSunil Goutham
4028bd4302b8SHariprasad Kelam /* untrusted VF can't overwrite admin(PF) changes */
4029bd4302b8SHariprasad Kelam if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4030bd4302b8SHariprasad Kelam (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
4031bd4302b8SHariprasad Kelam dev_warn(rvu->dev,
4032bd4302b8SHariprasad Kelam "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
4033f0c2982aSNaveen Mamindlapalli return -EPERM;
4034bd4302b8SHariprasad Kelam }
4035f0c2982aSNaveen Mamindlapalli
40366f03cf10SSunil Goutham ether_addr_copy(pfvf->mac_addr, req->mac_addr);
40376f03cf10SSunil Goutham
40386f03cf10SSunil Goutham rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
40396f03cf10SSunil Goutham pfvf->rx_chan_base, req->mac_addr);
404086cea61dSTomasz Duszynski
4041bd4302b8SHariprasad Kelam if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
4042bd4302b8SHariprasad Kelam ether_addr_copy(pfvf->default_mac, req->mac_addr);
4043bd4302b8SHariprasad Kelam
404423109f8dSSubbaraya Sundeep rvu_switch_update_rules(rvu, pcifunc);
404523109f8dSSubbaraya Sundeep
40466f03cf10SSunil Goutham return 0;
40476f03cf10SSunil Goutham }
40486f03cf10SSunil Goutham
rvu_mbox_handler_nix_get_mac_addr(struct rvu * rvu,struct msg_req * req,struct nix_get_mac_addr_rsp * rsp)404934bfe0ebSSunil Goutham int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
405034bfe0ebSSunil Goutham struct msg_req *req,
405134bfe0ebSSunil Goutham struct nix_get_mac_addr_rsp *rsp)
405234bfe0ebSSunil Goutham {
405334bfe0ebSSunil Goutham u16 pcifunc = req->hdr.pcifunc;
405434bfe0ebSSunil Goutham struct rvu_pfvf *pfvf;
405534bfe0ebSSunil Goutham
405634bfe0ebSSunil Goutham if (!is_nixlf_attached(rvu, pcifunc))
405734bfe0ebSSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
405834bfe0ebSSunil Goutham
405934bfe0ebSSunil Goutham pfvf = rvu_get_pfvf(rvu, pcifunc);
406034bfe0ebSSunil Goutham
406134bfe0ebSSunil Goutham ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
406234bfe0ebSSunil Goutham
406334bfe0ebSSunil Goutham return 0;
406434bfe0ebSSunil Goutham }
406534bfe0ebSSunil Goutham
rvu_mbox_handler_nix_set_rx_mode(struct rvu * rvu,struct nix_rx_mode * req,struct msg_rsp * rsp)4066eac66686SSunil Goutham int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
4067d6f092caSSunil Goutham struct msg_rsp *rsp)
4068d6f092caSSunil Goutham {
4069967db352SNaveen Mamindlapalli bool allmulti, promisc, nix_rx_multicast;
4070d6f092caSSunil Goutham u16 pcifunc = req->hdr.pcifunc;
4071d6f092caSSunil Goutham struct rvu_pfvf *pfvf;
4072967db352SNaveen Mamindlapalli int nixlf, err;
407352ccbdacSSunil Goutham
4074967db352SNaveen Mamindlapalli pfvf = rvu_get_pfvf(rvu, pcifunc);
4075967db352SNaveen Mamindlapalli promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
4076967db352SNaveen Mamindlapalli allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
4077967db352SNaveen Mamindlapalli pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
4078967db352SNaveen Mamindlapalli
4079967db352SNaveen Mamindlapalli nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
4080967db352SNaveen Mamindlapalli
4081967db352SNaveen Mamindlapalli if (is_vf(pcifunc) && !nix_rx_multicast &&
4082967db352SNaveen Mamindlapalli (promisc || allmulti)) {
4083967db352SNaveen Mamindlapalli dev_warn_ratelimited(rvu->dev,
4084967db352SNaveen Mamindlapalli "VF promisc/multicast not supported\n");
4085967db352SNaveen Mamindlapalli return 0;
4086967db352SNaveen Mamindlapalli }
4087967db352SNaveen Mamindlapalli
4088bd4302b8SHariprasad Kelam /* untrusted VF can't configure promisc/allmulti */
4089bd4302b8SHariprasad Kelam if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4090bd4302b8SHariprasad Kelam (promisc || allmulti))
4091bd4302b8SHariprasad Kelam return 0;
4092bd4302b8SHariprasad Kelam
4093967db352SNaveen Mamindlapalli err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
409452ccbdacSSunil Goutham if (err)
409552ccbdacSSunil Goutham return err;
4096d6f092caSSunil Goutham
4097967db352SNaveen Mamindlapalli if (nix_rx_multicast) {
4098967db352SNaveen Mamindlapalli /* add/del this PF_FUNC to/from mcast pkt replication list */
4099967db352SNaveen Mamindlapalli err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
4100967db352SNaveen Mamindlapalli allmulti);
4101967db352SNaveen Mamindlapalli if (err) {
4102967db352SNaveen Mamindlapalli dev_err(rvu->dev,
4103967db352SNaveen Mamindlapalli "Failed to update pcifunc 0x%x to multicast list\n",
4104967db352SNaveen Mamindlapalli pcifunc);
4105967db352SNaveen Mamindlapalli return err;
4106967db352SNaveen Mamindlapalli }
4107d6f092caSSunil Goutham
4108967db352SNaveen Mamindlapalli /* add/del this PF_FUNC to/from promisc pkt replication list */
4109967db352SNaveen Mamindlapalli err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
4110967db352SNaveen Mamindlapalli promisc);
4111967db352SNaveen Mamindlapalli if (err) {
4112967db352SNaveen Mamindlapalli dev_err(rvu->dev,
4113967db352SNaveen Mamindlapalli "Failed to update pcifunc 0x%x to promisc list\n",
4114967db352SNaveen Mamindlapalli pcifunc);
4115967db352SNaveen Mamindlapalli return err;
4116967db352SNaveen Mamindlapalli }
4117967db352SNaveen Mamindlapalli }
4118d6f092caSSunil Goutham
4119967db352SNaveen Mamindlapalli /* install/uninstall allmulti entry */
4120967db352SNaveen Mamindlapalli if (allmulti) {
4121967db352SNaveen Mamindlapalli rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
4122967db352SNaveen Mamindlapalli pfvf->rx_chan_base);
4123967db352SNaveen Mamindlapalli } else {
4124967db352SNaveen Mamindlapalli if (!nix_rx_multicast)
4125967db352SNaveen Mamindlapalli rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
4126967db352SNaveen Mamindlapalli }
4127967db352SNaveen Mamindlapalli
4128967db352SNaveen Mamindlapalli /* install/uninstall promisc entry */
4129af42088bSRatheesh Kannoth if (promisc)
4130d6f092caSSunil Goutham rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
4131d450a235SNalla, Pradeep pfvf->rx_chan_base,
4132967db352SNaveen Mamindlapalli pfvf->rx_chan_cnt);
4133af42088bSRatheesh Kannoth else
4134967db352SNaveen Mamindlapalli if (!nix_rx_multicast)
4135967db352SNaveen Mamindlapalli rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
4136d6c9784bSRatheesh Kannoth
4137d6f092caSSunil Goutham return 0;
4138d6f092caSSunil Goutham }
4139d6f092caSSunil Goutham
nix_find_link_frs(struct rvu * rvu,struct nix_frs_cfg * req,u16 pcifunc)41409b7dd87aSSunil Goutham static void nix_find_link_frs(struct rvu *rvu,
41419b7dd87aSSunil Goutham struct nix_frs_cfg *req, u16 pcifunc)
41429b7dd87aSSunil Goutham {
41439b7dd87aSSunil Goutham int pf = rvu_get_pf(pcifunc);
41449b7dd87aSSunil Goutham struct rvu_pfvf *pfvf;
41459b7dd87aSSunil Goutham int maxlen, minlen;
41469b7dd87aSSunil Goutham int numvfs, hwvf;
41479b7dd87aSSunil Goutham int vf;
41489b7dd87aSSunil Goutham
41499b7dd87aSSunil Goutham /* Update with requester's min/max lengths */
41509b7dd87aSSunil Goutham pfvf = rvu_get_pfvf(rvu, pcifunc);
41519b7dd87aSSunil Goutham pfvf->maxlen = req->maxlen;
41529b7dd87aSSunil Goutham if (req->update_minlen)
41539b7dd87aSSunil Goutham pfvf->minlen = req->minlen;
41549b7dd87aSSunil Goutham
41559b7dd87aSSunil Goutham maxlen = req->maxlen;
41569b7dd87aSSunil Goutham minlen = req->update_minlen ? req->minlen : 0;
41579b7dd87aSSunil Goutham
41589b7dd87aSSunil Goutham /* Get this PF's numVFs and starting hwvf */
41599b7dd87aSSunil Goutham rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
41609b7dd87aSSunil Goutham
41619b7dd87aSSunil Goutham /* For each VF, compare requested max/minlen */
41629b7dd87aSSunil Goutham for (vf = 0; vf < numvfs; vf++) {
41639b7dd87aSSunil Goutham pfvf = &rvu->hwvf[hwvf + vf];
41649b7dd87aSSunil Goutham if (pfvf->maxlen > maxlen)
41659b7dd87aSSunil Goutham maxlen = pfvf->maxlen;
41669b7dd87aSSunil Goutham if (req->update_minlen &&
41679b7dd87aSSunil Goutham pfvf->minlen && pfvf->minlen < minlen)
41689b7dd87aSSunil Goutham minlen = pfvf->minlen;
41699b7dd87aSSunil Goutham }
41709b7dd87aSSunil Goutham
41719b7dd87aSSunil Goutham /* Compare requested max/minlen with PF's max/minlen */
41729b7dd87aSSunil Goutham pfvf = &rvu->pf[pf];
41739b7dd87aSSunil Goutham if (pfvf->maxlen > maxlen)
41749b7dd87aSSunil Goutham maxlen = pfvf->maxlen;
41759b7dd87aSSunil Goutham if (req->update_minlen &&
41769b7dd87aSSunil Goutham pfvf->minlen && pfvf->minlen < minlen)
41779b7dd87aSSunil Goutham minlen = pfvf->minlen;
41789b7dd87aSSunil Goutham
41799b7dd87aSSunil Goutham /* Update the request with max/min PF's and it's VF's max/min */
41809b7dd87aSSunil Goutham req->maxlen = maxlen;
41819b7dd87aSSunil Goutham if (req->update_minlen)
41829b7dd87aSSunil Goutham req->minlen = minlen;
41839b7dd87aSSunil Goutham }
41849b7dd87aSSunil Goutham
rvu_mbox_handler_nix_set_hw_frs(struct rvu * rvu,struct nix_frs_cfg * req,struct msg_rsp * rsp)41859b7dd87aSSunil Goutham int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
41869b7dd87aSSunil Goutham struct msg_rsp *rsp)
41879b7dd87aSSunil Goutham {
41889b7dd87aSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
41899b7dd87aSSunil Goutham u16 pcifunc = req->hdr.pcifunc;
41909b7dd87aSSunil Goutham int pf = rvu_get_pf(pcifunc);
4191f725e894SNaveen Mamindlapalli int blkaddr, link = -1;
41929b7dd87aSSunil Goutham struct nix_hw *nix_hw;
4193aefaa8c7SHarman Kalra struct rvu_pfvf *pfvf;
41949b7dd87aSSunil Goutham u8 cgx = 0, lmac = 0;
41956e54e1c5SHariprasad Kelam u16 max_mtu;
4196f725e894SNaveen Mamindlapalli u64 cfg;
41979b7dd87aSSunil Goutham
41989b7dd87aSSunil Goutham blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
41999b7dd87aSSunil Goutham if (blkaddr < 0)
42009b7dd87aSSunil Goutham return NIX_AF_ERR_AF_LF_INVALID;
42019b7dd87aSSunil Goutham
42029b7dd87aSSunil Goutham nix_hw = get_nix_hw(rvu->hw, blkaddr);
42039b7dd87aSSunil Goutham if (!nix_hw)
42047278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
42059b7dd87aSSunil Goutham
42066e54e1c5SHariprasad Kelam if (is_afvf(pcifunc))
42076e54e1c5SHariprasad Kelam rvu_get_lbk_link_max_frs(rvu, &max_mtu);
42086e54e1c5SHariprasad Kelam else
42096e54e1c5SHariprasad Kelam rvu_get_lmac_link_max_frs(rvu, &max_mtu);
42106e54e1c5SHariprasad Kelam
42116e54e1c5SHariprasad Kelam if (!req->sdp_link && req->maxlen > max_mtu)
42129b7dd87aSSunil Goutham return NIX_AF_ERR_FRS_INVALID;
42139b7dd87aSSunil Goutham
42149b7dd87aSSunil Goutham if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
42159b7dd87aSSunil Goutham return NIX_AF_ERR_FRS_INVALID;
42169b7dd87aSSunil Goutham
42179b7dd87aSSunil Goutham /* Check if config is for SDP link */
42189b7dd87aSSunil Goutham if (req->sdp_link) {
42199b7dd87aSSunil Goutham if (!hw->sdp_links)
42209b7dd87aSSunil Goutham return NIX_AF_ERR_RX_LINK_INVALID;
42219b7dd87aSSunil Goutham link = hw->cgx_links + hw->lbk_links;
42229b7dd87aSSunil Goutham goto linkcfg;
42239b7dd87aSSunil Goutham }
42249b7dd87aSSunil Goutham
42259b7dd87aSSunil Goutham /* Check if the request is from CGX mapped RVU PF */
42269b7dd87aSSunil Goutham if (is_pf_cgxmapped(rvu, pf)) {
42279b7dd87aSSunil Goutham /* Get CGX and LMAC to which this PF is mapped and find link */
42289b7dd87aSSunil Goutham rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
42299b7dd87aSSunil Goutham link = (cgx * hw->lmac_per_cgx) + lmac;
42309b7dd87aSSunil Goutham } else if (pf == 0) {
42319b7dd87aSSunil Goutham /* For VFs of PF0 ingress is LBK port, so config LBK link */
4232aefaa8c7SHarman Kalra pfvf = rvu_get_pfvf(rvu, pcifunc);
4233aefaa8c7SHarman Kalra link = hw->cgx_links + pfvf->lbkid;
42349b7dd87aSSunil Goutham }
42359b7dd87aSSunil Goutham
42369b7dd87aSSunil Goutham if (link < 0)
42379b7dd87aSSunil Goutham return NIX_AF_ERR_RX_LINK_INVALID;
42389b7dd87aSSunil Goutham
42399b7dd87aSSunil Goutham linkcfg:
424005f3d5bcSHariprasad Kelam nix_find_link_frs(rvu, req, pcifunc);
424105f3d5bcSHariprasad Kelam
42429b7dd87aSSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
42439b7dd87aSSunil Goutham cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
42449b7dd87aSSunil Goutham if (req->update_minlen)
42459b7dd87aSSunil Goutham cfg = (cfg & ~0xFFFFULL) | req->minlen;
42469b7dd87aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
42479b7dd87aSSunil Goutham
42489b7dd87aSSunil Goutham return 0;
42499b7dd87aSSunil Goutham }
42509b7dd87aSSunil Goutham
rvu_mbox_handler_nix_set_rx_cfg(struct rvu * rvu,struct nix_rx_cfg * req,struct msg_rsp * rsp)4251159a8a67SVidhya Raman int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4252159a8a67SVidhya Raman struct msg_rsp *rsp)
4253159a8a67SVidhya Raman {
425452ccbdacSSunil Goutham int nixlf, blkaddr, err;
4255159a8a67SVidhya Raman u64 cfg;
4256159a8a67SVidhya Raman
425752ccbdacSSunil Goutham err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
425852ccbdacSSunil Goutham if (err)
425952ccbdacSSunil Goutham return err;
4260159a8a67SVidhya Raman
4261159a8a67SVidhya Raman cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4262159a8a67SVidhya Raman /* Set the interface configuration */
4263159a8a67SVidhya Raman if (req->len_verify & BIT(0))
4264159a8a67SVidhya Raman cfg |= BIT_ULL(41);
4265159a8a67SVidhya Raman else
4266159a8a67SVidhya Raman cfg &= ~BIT_ULL(41);
4267159a8a67SVidhya Raman
4268159a8a67SVidhya Raman if (req->len_verify & BIT(1))
4269159a8a67SVidhya Raman cfg |= BIT_ULL(40);
4270159a8a67SVidhya Raman else
4271159a8a67SVidhya Raman cfg &= ~BIT_ULL(40);
4272159a8a67SVidhya Raman
42734ed6387aSNithin Dabilpuram if (req->len_verify & NIX_RX_DROP_RE)
42744ed6387aSNithin Dabilpuram cfg |= BIT_ULL(32);
42754ed6387aSNithin Dabilpuram else
42764ed6387aSNithin Dabilpuram cfg &= ~BIT_ULL(32);
42774ed6387aSNithin Dabilpuram
4278159a8a67SVidhya Raman if (req->csum_verify & BIT(0))
4279159a8a67SVidhya Raman cfg |= BIT_ULL(37);
4280159a8a67SVidhya Raman else
4281159a8a67SVidhya Raman cfg &= ~BIT_ULL(37);
4282159a8a67SVidhya Raman
4283159a8a67SVidhya Raman rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4284159a8a67SVidhya Raman
4285159a8a67SVidhya Raman return 0;
4286159a8a67SVidhya Raman }
4287159a8a67SVidhya Raman
rvu_get_lbk_link_credits(struct rvu * rvu,u16 lbk_max_frs)42886e54e1c5SHariprasad Kelam static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
42896e54e1c5SHariprasad Kelam {
42906e54e1c5SHariprasad Kelam return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
42916e54e1c5SHariprasad Kelam }
42926e54e1c5SHariprasad Kelam
nix_link_config(struct rvu * rvu,int blkaddr,struct nix_hw * nix_hw)42931c74b891SNithin Dabilpuram static void nix_link_config(struct rvu *rvu, int blkaddr,
42941c74b891SNithin Dabilpuram struct nix_hw *nix_hw)
42959b7dd87aSSunil Goutham {
42969b7dd87aSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
42979b7dd87aSSunil Goutham int cgx, lmac_cnt, slink, link;
42986e54e1c5SHariprasad Kelam u16 lbk_max_frs, lmac_max_frs;
4299459f326eSSunil Goutham unsigned long lmac_bmap;
43001c74b891SNithin Dabilpuram u64 tx_credits, cfg;
4301459f326eSSunil Goutham u64 lmac_fifo_len;
4302459f326eSSunil Goutham int iter;
43039b7dd87aSSunil Goutham
43046e54e1c5SHariprasad Kelam rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
43056e54e1c5SHariprasad Kelam rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
43066e54e1c5SHariprasad Kelam
43079b7dd87aSSunil Goutham /* Set default min/max packet lengths allowed on NIX Rx links.
43089b7dd87aSSunil Goutham *
43099b7dd87aSSunil Goutham * With HW reset minlen value of 60byte, HW will treat ARP pkts
43109b7dd87aSSunil Goutham * as undersize and report them to SW as error pkts, hence
43119b7dd87aSSunil Goutham * setting it to 40 bytes.
43129b7dd87aSSunil Goutham */
43136e54e1c5SHariprasad Kelam for (link = 0; link < hw->cgx_links; link++) {
43149b7dd87aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
43156e54e1c5SHariprasad Kelam ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
43169b7dd87aSSunil Goutham }
43179b7dd87aSSunil Goutham
43186e54e1c5SHariprasad Kelam for (link = hw->cgx_links; link < hw->lbk_links; link++) {
43196e54e1c5SHariprasad Kelam rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
43206e54e1c5SHariprasad Kelam ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
43216e54e1c5SHariprasad Kelam }
43229b7dd87aSSunil Goutham if (hw->sdp_links) {
43239b7dd87aSSunil Goutham link = hw->cgx_links + hw->lbk_links;
43249b7dd87aSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
43259b7dd87aSSunil Goutham SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
43269b7dd87aSSunil Goutham }
43279b7dd87aSSunil Goutham
432800efd99eSNithin Dabilpuram /* Get MCS external bypass status for CN10K-B */
432900efd99eSNithin Dabilpuram if (mcs_get_blkcnt() == 1) {
433000efd99eSNithin Dabilpuram /* Adjust for 2 credits when external bypass is disabled */
433100efd99eSNithin Dabilpuram nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
433200efd99eSNithin Dabilpuram }
433300efd99eSNithin Dabilpuram
43349b7dd87aSSunil Goutham /* Set credits for Tx links assuming max packet length allowed.
43359b7dd87aSSunil Goutham * This will be reconfigured based on MTU set for PF/VF.
43369b7dd87aSSunil Goutham */
43379b7dd87aSSunil Goutham for (cgx = 0; cgx < hw->cgx; cgx++) {
43389b7dd87aSSunil Goutham lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
43391c74b891SNithin Dabilpuram /* Skip when cgx is not available or lmac cnt is zero */
43401c74b891SNithin Dabilpuram if (lmac_cnt <= 0)
43411c74b891SNithin Dabilpuram continue;
4342459f326eSSunil Goutham slink = cgx * hw->lmac_per_cgx;
4343459f326eSSunil Goutham
4344459f326eSSunil Goutham /* Get LMAC id's from bitmap */
4345459f326eSSunil Goutham lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4346f2e664adSRakesh Babu Saladi for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
4347459f326eSSunil Goutham lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4348459f326eSSunil Goutham if (!lmac_fifo_len) {
4349459f326eSSunil Goutham dev_err(rvu->dev,
4350459f326eSSunil Goutham "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4351459f326eSSunil Goutham __func__, cgx, iter);
4352459f326eSSunil Goutham continue;
4353459f326eSSunil Goutham }
4354459f326eSSunil Goutham tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
43559b7dd87aSSunil Goutham /* Enable credits and set credit pkt count to max allowed */
43561c74b891SNithin Dabilpuram cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
435700efd99eSNithin Dabilpuram cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
4358459f326eSSunil Goutham
4359459f326eSSunil Goutham link = iter + slink;
43601c74b891SNithin Dabilpuram nix_hw->tx_credits[link] = tx_credits;
43619b7dd87aSSunil Goutham rvu_write64(rvu, blkaddr,
43621c74b891SNithin Dabilpuram NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
43639b7dd87aSSunil Goutham }
43649b7dd87aSSunil Goutham }
43659b7dd87aSSunil Goutham
43669b7dd87aSSunil Goutham /* Set Tx credits for LBK link */
43679b7dd87aSSunil Goutham slink = hw->cgx_links;
43689b7dd87aSSunil Goutham for (link = slink; link < (slink + hw->lbk_links); link++) {
43696e54e1c5SHariprasad Kelam tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
43701c74b891SNithin Dabilpuram nix_hw->tx_credits[link] = tx_credits;
43719b7dd87aSSunil Goutham /* Enable credits and set credit pkt count to max allowed */
43729b7dd87aSSunil Goutham tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
43739b7dd87aSSunil Goutham rvu_write64(rvu, blkaddr,
43749b7dd87aSSunil Goutham NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
43759b7dd87aSSunil Goutham }
43769b7dd87aSSunil Goutham }
43779b7dd87aSSunil Goutham
nix_calibrate_x2p(struct rvu * rvu,int blkaddr)4378aba53d5dSSunil Goutham static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4379aba53d5dSSunil Goutham {
4380aba53d5dSSunil Goutham int idx, err;
4381aba53d5dSSunil Goutham u64 status;
4382aba53d5dSSunil Goutham
4383aba53d5dSSunil Goutham /* Start X2P bus calibration */
4384aba53d5dSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4385aba53d5dSSunil Goutham rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4386aba53d5dSSunil Goutham /* Wait for calibration to complete */
4387aba53d5dSSunil Goutham err = rvu_poll_reg(rvu, blkaddr,
4388aba53d5dSSunil Goutham NIX_AF_STATUS, BIT_ULL(10), false);
4389aba53d5dSSunil Goutham if (err) {
4390aba53d5dSSunil Goutham dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4391aba53d5dSSunil Goutham return err;
4392aba53d5dSSunil Goutham }
4393aba53d5dSSunil Goutham
4394aba53d5dSSunil Goutham status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4395aba53d5dSSunil Goutham /* Check if CGX devices are ready */
439612e4c9abSLinu Cherian for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
439712e4c9abSLinu Cherian /* Skip when cgx port is not available */
439812e4c9abSLinu Cherian if (!rvu_cgx_pdata(idx, rvu) ||
439912e4c9abSLinu Cherian (status & (BIT_ULL(16 + idx))))
4400aba53d5dSSunil Goutham continue;
4401aba53d5dSSunil Goutham dev_err(rvu->dev,
4402aba53d5dSSunil Goutham "CGX%d didn't respond to NIX X2P calibration\n", idx);
4403aba53d5dSSunil Goutham err = -EBUSY;
4404aba53d5dSSunil Goutham }
4405aba53d5dSSunil Goutham
4406aba53d5dSSunil Goutham /* Check if LBK is ready */
4407aba53d5dSSunil Goutham if (!(status & BIT_ULL(19))) {
4408aba53d5dSSunil Goutham dev_err(rvu->dev,
4409aba53d5dSSunil Goutham "LBK didn't respond to NIX X2P calibration\n");
4410aba53d5dSSunil Goutham err = -EBUSY;
4411aba53d5dSSunil Goutham }
4412aba53d5dSSunil Goutham
4413aba53d5dSSunil Goutham /* Clear 'calibrate_x2p' bit */
4414aba53d5dSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4415aba53d5dSSunil Goutham rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4416aba53d5dSSunil Goutham if (err || (status & 0x3FFULL))
4417aba53d5dSSunil Goutham dev_err(rvu->dev,
4418aba53d5dSSunil Goutham "NIX X2P calibration failed, status 0x%llx\n", status);
4419aba53d5dSSunil Goutham if (err)
4420aba53d5dSSunil Goutham return err;
4421aba53d5dSSunil Goutham return 0;
4422aba53d5dSSunil Goutham }
4423aba53d5dSSunil Goutham
nix_aq_init(struct rvu * rvu,struct rvu_block * block)4424aba53d5dSSunil Goutham static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4425aba53d5dSSunil Goutham {
4426aba53d5dSSunil Goutham u64 cfg;
4427aba53d5dSSunil Goutham int err;
4428aba53d5dSSunil Goutham
4429aba53d5dSSunil Goutham /* Set admin queue endianness */
4430aba53d5dSSunil Goutham cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4431aba53d5dSSunil Goutham #ifdef __BIG_ENDIAN
4432e12890f4SSunil Goutham cfg |= BIT_ULL(8);
4433aba53d5dSSunil Goutham rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4434aba53d5dSSunil Goutham #else
4435e12890f4SSunil Goutham cfg &= ~BIT_ULL(8);
4436aba53d5dSSunil Goutham rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4437aba53d5dSSunil Goutham #endif
4438aba53d5dSSunil Goutham
4439aba53d5dSSunil Goutham /* Do not bypass NDC cache */
4440aba53d5dSSunil Goutham cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4441aba53d5dSSunil Goutham cfg &= ~0x3FFEULL;
4442a0291766SSunil Goutham #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4443a0291766SSunil Goutham /* Disable caching of SQB aka SQEs */
4444a0291766SSunil Goutham cfg |= 0x04ULL;
4445a0291766SSunil Goutham #endif
4446aba53d5dSSunil Goutham rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4447aba53d5dSSunil Goutham
4448aba53d5dSSunil Goutham /* Result structure can be followed by RQ/SQ/CQ context at
4449aba53d5dSSunil Goutham * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4450aba53d5dSSunil Goutham * operation type. Alloc sufficient result memory for all operations.
4451aba53d5dSSunil Goutham */
4452aba53d5dSSunil Goutham err = rvu_aq_alloc(rvu, &block->aq,
4453aba53d5dSSunil Goutham Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4454aba53d5dSSunil Goutham ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4455aba53d5dSSunil Goutham if (err)
4456aba53d5dSSunil Goutham return err;
4457aba53d5dSSunil Goutham
4458aba53d5dSSunil Goutham rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4459aba53d5dSSunil Goutham rvu_write64(rvu, block->addr,
4460aba53d5dSSunil Goutham NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4461aba53d5dSSunil Goutham return 0;
4462aba53d5dSSunil Goutham }
4463aba53d5dSSunil Goutham
rvu_nix_setup_capabilities(struct rvu * rvu,int blkaddr)446476660df2SSunil Goutham static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
446576660df2SSunil Goutham {
446676660df2SSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
446776660df2SSunil Goutham u64 hw_const;
446876660df2SSunil Goutham
446976660df2SSunil Goutham hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
447076660df2SSunil Goutham
447176660df2SSunil Goutham /* On OcteonTx2 DWRR quantum is directly configured into each of
447276660df2SSunil Goutham * the transmit scheduler queues. And PF/VF drivers were free to
447376660df2SSunil Goutham * config any value upto 2^24.
447476660df2SSunil Goutham * On CN10K, HW is modified, the quantum configuration at scheduler
447576660df2SSunil Goutham * queues is in terms of weight. And SW needs to setup a base DWRR MTU
447676660df2SSunil Goutham * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
447776660df2SSunil Goutham * 'DWRR MTU * weight' to get the quantum.
447876660df2SSunil Goutham *
447976660df2SSunil Goutham * Check if HW uses a common MTU for all DWRR quantum configs.
448076660df2SSunil Goutham * On OcteonTx2 this register field is '0'.
448176660df2SSunil Goutham */
4482bbba125eSSunil Goutham if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
448376660df2SSunil Goutham hw->cap.nix_common_dwrr_mtu = true;
4484bbba125eSSunil Goutham
4485bbba125eSSunil Goutham if (hw_const & BIT_ULL(61))
4486bbba125eSSunil Goutham hw->cap.nix_multiple_dwrr_mtu = true;
448776660df2SSunil Goutham }
448876660df2SSunil Goutham
rvu_nix_block_init(struct rvu * rvu,struct nix_hw * nix_hw)4489221f3dffSRakesh Babu static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4490aba53d5dSSunil Goutham {
449142006910SStanislaw Kardach const struct npc_lt_def_cfg *ltdefs;
4492aba53d5dSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
4493221f3dffSRakesh Babu int blkaddr = nix_hw->blkaddr;
4494aba53d5dSSunil Goutham struct rvu_block *block;
4495221f3dffSRakesh Babu int err;
4496709a4f0cSSunil Goutham u64 cfg;
4497aba53d5dSSunil Goutham
4498aba53d5dSSunil Goutham block = &hw->block[blkaddr];
4499aba53d5dSSunil Goutham
45005d9b976dSSunil Goutham if (is_rvu_96xx_B0(rvu)) {
45015d9b976dSSunil Goutham /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4502e12890f4SSunil Goutham * internal state when conditional clocks are turned off.
4503e12890f4SSunil Goutham * Hence enable them.
4504e12890f4SSunil Goutham */
4505e12890f4SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_CFG,
45065d9b976dSSunil Goutham rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4507cc10d3eaSGeetha sowjanya }
45085d9b976dSSunil Goutham
45095d9b976dSSunil Goutham /* Set chan/link to backpressure TL3 instead of TL2 */
45105d9b976dSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
45115d9b976dSSunil Goutham
45125d9b976dSSunil Goutham /* Disable SQ manager's sticky mode operation (set TM6 = 0)
45135d9b976dSSunil Goutham * This sticky mode is known to cause SQ stalls when multiple
45145d9b976dSSunil Goutham * SQs are mapped to same SMQ and transmitting pkts at a time.
45155d9b976dSSunil Goutham */
45165d9b976dSSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
45175d9b976dSSunil Goutham cfg &= ~BIT_ULL(15);
45185d9b976dSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4519e12890f4SSunil Goutham
452042006910SStanislaw Kardach ltdefs = rvu->kpu.lt_def;
4521aba53d5dSSunil Goutham /* Calibrate X2P bus to check if CGX/LBK links are fine */
4522aba53d5dSSunil Goutham err = nix_calibrate_x2p(rvu, blkaddr);
4523aba53d5dSSunil Goutham if (err)
4524aba53d5dSSunil Goutham return err;
4525aba53d5dSSunil Goutham
452676660df2SSunil Goutham /* Setup capabilities of the NIX block */
452776660df2SSunil Goutham rvu_nix_setup_capabilities(rvu, blkaddr);
452876660df2SSunil Goutham
4529aba53d5dSSunil Goutham /* Initialize admin queue */
4530aba53d5dSSunil Goutham err = nix_aq_init(rvu, block);
4531aba53d5dSSunil Goutham if (err)
4532aba53d5dSSunil Goutham return err;
4533aba53d5dSSunil Goutham
4534aba53d5dSSunil Goutham /* Restore CINT timer delay to HW reset values */
4535aba53d5dSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4536aba53d5dSSunil Goutham
45372958d17aSHariprasad Kelam cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
45382958d17aSHariprasad Kelam
4539a7314371SGeetha sowjanya /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
45402958d17aSHariprasad Kelam cfg |= 1ULL;
45412958d17aSHariprasad Kelam if (!is_rvu_otx2(rvu))
45422958d17aSHariprasad Kelam cfg |= NIX_PTP_1STEP_EN;
45432958d17aSHariprasad Kelam
45442958d17aSHariprasad Kelam rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4545a7314371SGeetha sowjanya
4546933a01adSGeetha sowjanya if (!is_rvu_otx2(rvu))
4547933a01adSGeetha sowjanya rvu_nix_block_cn10k_init(rvu, nix_hw);
4548933a01adSGeetha sowjanya
4549221f3dffSRakesh Babu if (is_block_implemented(hw, blkaddr)) {
4550221f3dffSRakesh Babu err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4551709a4f0cSSunil Goutham if (err)
4552709a4f0cSSunil Goutham return err;
455352d3d327SSunil Goutham
4554e8e095b3SSunil Goutham err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4555e8e095b3SSunil Goutham if (err)
4556e8e095b3SSunil Goutham return err;
4557e8e095b3SSunil Goutham
4558221f3dffSRakesh Babu err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4559a27d7659SKrzysztof Kanas if (err)
4560a27d7659SKrzysztof Kanas return err;
4561a27d7659SKrzysztof Kanas
4562221f3dffSRakesh Babu err = nix_setup_mcast(rvu, nix_hw, blkaddr);
456352d3d327SSunil Goutham if (err)
456452d3d327SSunil Goutham return err;
45656b3321baSSunil Goutham
45669a946defSVamsi Attunuru err = nix_setup_txvlan(rvu, nix_hw);
45679a946defSVamsi Attunuru if (err)
45689a946defSVamsi Attunuru return err;
45699a946defSVamsi Attunuru
4570da5d32e1SNithin Dabilpuram /* Configure segmentation offload formats */
4571221f3dffSRakesh Babu nix_setup_lso(rvu, nix_hw, blkaddr);
4572da5d32e1SNithin Dabilpuram
45737c91a92eSJerin Jacob /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
45746b3321baSSunil Goutham * This helps HW protocol checker to identify headers
45756b3321baSSunil Goutham * and validate length and checksums.
45766b3321baSSunil Goutham */
45776b3321baSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
457842006910SStanislaw Kardach (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
457942006910SStanislaw Kardach ltdefs->rx_ol2.ltype_mask);
45806b3321baSSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
458142006910SStanislaw Kardach (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
458242006910SStanislaw Kardach ltdefs->rx_oip4.ltype_mask);
45837c91a92eSJerin Jacob rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
458442006910SStanislaw Kardach (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
458542006910SStanislaw Kardach ltdefs->rx_iip4.ltype_mask);
45867c91a92eSJerin Jacob rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
458742006910SStanislaw Kardach (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
458842006910SStanislaw Kardach ltdefs->rx_oip6.ltype_mask);
45897c91a92eSJerin Jacob rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
459042006910SStanislaw Kardach (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
459142006910SStanislaw Kardach ltdefs->rx_iip6.ltype_mask);
45927c91a92eSJerin Jacob rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
459342006910SStanislaw Kardach (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
459442006910SStanislaw Kardach ltdefs->rx_otcp.ltype_mask);
45957c91a92eSJerin Jacob rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
459642006910SStanislaw Kardach (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
459742006910SStanislaw Kardach ltdefs->rx_itcp.ltype_mask);
45987c91a92eSJerin Jacob rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
459942006910SStanislaw Kardach (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
460042006910SStanislaw Kardach ltdefs->rx_oudp.ltype_mask);
46017c91a92eSJerin Jacob rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
460242006910SStanislaw Kardach (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
460342006910SStanislaw Kardach ltdefs->rx_iudp.ltype_mask);
46047c91a92eSJerin Jacob rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
460542006910SStanislaw Kardach (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
460642006910SStanislaw Kardach ltdefs->rx_osctp.ltype_mask);
46077c91a92eSJerin Jacob rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
460842006910SStanislaw Kardach (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
460942006910SStanislaw Kardach ltdefs->rx_isctp.ltype_mask);
461041a7aa7bSSunil Goutham
4611c87e6b13SHarman Kalra if (!is_rvu_otx2(rvu)) {
4612c87e6b13SHarman Kalra /* Enable APAD calculation for other protocols
4613c87e6b13SHarman Kalra * matching APAD0 and APAD1 lt def registers.
4614c87e6b13SHarman Kalra */
4615c87e6b13SHarman Kalra rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
4616c87e6b13SHarman Kalra (ltdefs->rx_apad0.valid << 11) |
4617c87e6b13SHarman Kalra (ltdefs->rx_apad0.lid << 8) |
4618c87e6b13SHarman Kalra (ltdefs->rx_apad0.ltype_match << 4) |
4619c87e6b13SHarman Kalra ltdefs->rx_apad0.ltype_mask);
4620c87e6b13SHarman Kalra rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
4621c87e6b13SHarman Kalra (ltdefs->rx_apad1.valid << 11) |
4622c87e6b13SHarman Kalra (ltdefs->rx_apad1.lid << 8) |
4623c87e6b13SHarman Kalra (ltdefs->rx_apad1.ltype_match << 4) |
4624c87e6b13SHarman Kalra ltdefs->rx_apad1.ltype_mask);
4625c87e6b13SHarman Kalra
4626c87e6b13SHarman Kalra /* Receive ethertype defination register defines layer
4627c87e6b13SHarman Kalra * information in NPC_RESULT_S to identify the Ethertype
4628c87e6b13SHarman Kalra * location in L2 header. Used for Ethertype overwriting
4629c87e6b13SHarman Kalra * in inline IPsec flow.
4630c87e6b13SHarman Kalra */
4631c87e6b13SHarman Kalra rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
4632c87e6b13SHarman Kalra (ltdefs->rx_et[0].offset << 12) |
4633c87e6b13SHarman Kalra (ltdefs->rx_et[0].valid << 11) |
4634c87e6b13SHarman Kalra (ltdefs->rx_et[0].lid << 8) |
4635c87e6b13SHarman Kalra (ltdefs->rx_et[0].ltype_match << 4) |
4636c87e6b13SHarman Kalra ltdefs->rx_et[0].ltype_mask);
4637c87e6b13SHarman Kalra rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
4638c87e6b13SHarman Kalra (ltdefs->rx_et[1].offset << 12) |
4639c87e6b13SHarman Kalra (ltdefs->rx_et[1].valid << 11) |
4640c87e6b13SHarman Kalra (ltdefs->rx_et[1].lid << 8) |
4641c87e6b13SHarman Kalra (ltdefs->rx_et[1].ltype_match << 4) |
4642c87e6b13SHarman Kalra ltdefs->rx_et[1].ltype_mask);
4643c87e6b13SHarman Kalra }
4644c87e6b13SHarman Kalra
46457ee74697SJerin Jacob err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
46467ee74697SJerin Jacob if (err)
46477ee74697SJerin Jacob return err;
46489b7dd87aSSunil Goutham
46491c74b891SNithin Dabilpuram nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
46501c74b891SNithin Dabilpuram sizeof(u64), GFP_KERNEL);
46511c74b891SNithin Dabilpuram if (!nix_hw->tx_credits)
46521c74b891SNithin Dabilpuram return -ENOMEM;
46531c74b891SNithin Dabilpuram
46549b7dd87aSSunil Goutham /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
46551c74b891SNithin Dabilpuram nix_link_config(rvu, blkaddr, nix_hw);
465627150bc4SGeetha sowjanya
465727150bc4SGeetha sowjanya /* Enable Channel backpressure */
465827150bc4SGeetha sowjanya rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
4659709a4f0cSSunil Goutham }
4660aba53d5dSSunil Goutham return 0;
4661aba53d5dSSunil Goutham }
4662aba53d5dSSunil Goutham
rvu_nix_init(struct rvu * rvu)4663221f3dffSRakesh Babu int rvu_nix_init(struct rvu *rvu)
4664aba53d5dSSunil Goutham {
4665aba53d5dSSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
4666221f3dffSRakesh Babu struct nix_hw *nix_hw;
4667221f3dffSRakesh Babu int blkaddr = 0, err;
4668221f3dffSRakesh Babu int i = 0;
4669221f3dffSRakesh Babu
4670221f3dffSRakesh Babu hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
4671221f3dffSRakesh Babu GFP_KERNEL);
4672221f3dffSRakesh Babu if (!hw->nix)
4673221f3dffSRakesh Babu return -ENOMEM;
4674221f3dffSRakesh Babu
4675221f3dffSRakesh Babu blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4676221f3dffSRakesh Babu while (blkaddr) {
4677221f3dffSRakesh Babu nix_hw = &hw->nix[i];
4678221f3dffSRakesh Babu nix_hw->rvu = rvu;
4679221f3dffSRakesh Babu nix_hw->blkaddr = blkaddr;
4680221f3dffSRakesh Babu err = rvu_nix_block_init(rvu, nix_hw);
4681221f3dffSRakesh Babu if (err)
4682221f3dffSRakesh Babu return err;
4683221f3dffSRakesh Babu blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4684221f3dffSRakesh Babu i++;
4685221f3dffSRakesh Babu }
4686221f3dffSRakesh Babu
4687221f3dffSRakesh Babu return 0;
4688221f3dffSRakesh Babu }
4689221f3dffSRakesh Babu
rvu_nix_block_freemem(struct rvu * rvu,int blkaddr,struct rvu_block * block)4690221f3dffSRakesh Babu static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
4691221f3dffSRakesh Babu struct rvu_block *block)
4692221f3dffSRakesh Babu {
4693709a4f0cSSunil Goutham struct nix_txsch *txsch;
469452d3d327SSunil Goutham struct nix_mcast *mcast;
46959a946defSVamsi Attunuru struct nix_txvlan *vlan;
4696709a4f0cSSunil Goutham struct nix_hw *nix_hw;
4697221f3dffSRakesh Babu int lvl;
4698aba53d5dSSunil Goutham
4699aba53d5dSSunil Goutham rvu_aq_free(rvu, block->aq);
4700709a4f0cSSunil Goutham
4701221f3dffSRakesh Babu if (is_block_implemented(rvu->hw, blkaddr)) {
4702709a4f0cSSunil Goutham nix_hw = get_nix_hw(rvu->hw, blkaddr);
4703709a4f0cSSunil Goutham if (!nix_hw)
4704709a4f0cSSunil Goutham return;
4705709a4f0cSSunil Goutham
4706709a4f0cSSunil Goutham for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
4707709a4f0cSSunil Goutham txsch = &nix_hw->txsch[lvl];
4708709a4f0cSSunil Goutham kfree(txsch->schq.bmap);
4709709a4f0cSSunil Goutham }
471052d3d327SSunil Goutham
47111c74b891SNithin Dabilpuram kfree(nix_hw->tx_credits);
47121c74b891SNithin Dabilpuram
471307cccffdSGeetha sowjanya nix_ipolicer_freemem(rvu, nix_hw);
4714e8e095b3SSunil Goutham
47159a946defSVamsi Attunuru vlan = &nix_hw->txvlan;
47169a946defSVamsi Attunuru kfree(vlan->rsrc.bmap);
47179a946defSVamsi Attunuru mutex_destroy(&vlan->rsrc_lock);
47189a946defSVamsi Attunuru
471952d3d327SSunil Goutham mcast = &nix_hw->mcast;
472052d3d327SSunil Goutham qmem_free(rvu->dev, mcast->mce_ctx);
472152d3d327SSunil Goutham qmem_free(rvu->dev, mcast->mcast_buf);
47220964fc8fSStanislaw Kardach mutex_destroy(&mcast->mce_lock);
4723709a4f0cSSunil Goutham }
4724aba53d5dSSunil Goutham }
472540df309eSSunil Goutham
rvu_nix_freemem(struct rvu * rvu)4726221f3dffSRakesh Babu void rvu_nix_freemem(struct rvu *rvu)
4727221f3dffSRakesh Babu {
4728221f3dffSRakesh Babu struct rvu_hwinfo *hw = rvu->hw;
4729221f3dffSRakesh Babu struct rvu_block *block;
4730221f3dffSRakesh Babu int blkaddr = 0;
4731221f3dffSRakesh Babu
4732221f3dffSRakesh Babu blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4733221f3dffSRakesh Babu while (blkaddr) {
4734221f3dffSRakesh Babu block = &hw->block[blkaddr];
4735221f3dffSRakesh Babu rvu_nix_block_freemem(rvu, blkaddr, block);
4736221f3dffSRakesh Babu blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4737221f3dffSRakesh Babu }
4738221f3dffSRakesh Babu }
4739221f3dffSRakesh Babu
rvu_mbox_handler_nix_lf_start_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)474040df309eSSunil Goutham int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
474140df309eSSunil Goutham struct msg_rsp *rsp)
474240df309eSSunil Goutham {
474340df309eSSunil Goutham u16 pcifunc = req->hdr.pcifunc;
4744967db352SNaveen Mamindlapalli struct rvu_pfvf *pfvf;
474540df309eSSunil Goutham int nixlf, err;
474640df309eSSunil Goutham
474752ccbdacSSunil Goutham err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
474840df309eSSunil Goutham if (err)
474940df309eSSunil Goutham return err;
475040df309eSSunil Goutham
475140df309eSSunil Goutham rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
4752a7faa68bSSubbaraya Sundeep
475355307fcbSSubbaraya Sundeep npc_mcam_enable_flows(rvu, pcifunc);
475455307fcbSSubbaraya Sundeep
4755967db352SNaveen Mamindlapalli pfvf = rvu_get_pfvf(rvu, pcifunc);
4756967db352SNaveen Mamindlapalli set_bit(NIXLF_INITIALIZED, &pfvf->flags);
4757967db352SNaveen Mamindlapalli
475823109f8dSSubbaraya Sundeep rvu_switch_update_rules(rvu, pcifunc);
475923109f8dSSubbaraya Sundeep
4760a7faa68bSSubbaraya Sundeep return rvu_cgx_start_stop_io(rvu, pcifunc, true);
476140df309eSSunil Goutham }
476240df309eSSunil Goutham
rvu_mbox_handler_nix_lf_stop_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)476340df309eSSunil Goutham int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
476440df309eSSunil Goutham struct msg_rsp *rsp)
476540df309eSSunil Goutham {
476640df309eSSunil Goutham u16 pcifunc = req->hdr.pcifunc;
4767967db352SNaveen Mamindlapalli struct rvu_pfvf *pfvf;
476840df309eSSunil Goutham int nixlf, err;
476940df309eSSunil Goutham
477052ccbdacSSunil Goutham err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
477140df309eSSunil Goutham if (err)
477240df309eSSunil Goutham return err;
477340df309eSSunil Goutham
4774b6b0e366SSubbaraya Sundeep rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
477555307fcbSSubbaraya Sundeep
4776967db352SNaveen Mamindlapalli pfvf = rvu_get_pfvf(rvu, pcifunc);
4777967db352SNaveen Mamindlapalli clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4778967db352SNaveen Mamindlapalli
477967a4a258SNaveen Mamindlapalli err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
478067a4a258SNaveen Mamindlapalli if (err)
478167a4a258SNaveen Mamindlapalli return err;
478267a4a258SNaveen Mamindlapalli
478367a4a258SNaveen Mamindlapalli rvu_cgx_tx_enable(rvu, pcifunc, true);
478467a4a258SNaveen Mamindlapalli
478567a4a258SNaveen Mamindlapalli return 0;
478640df309eSSunil Goutham }
4787c554f9c1SGeetha sowjanya
4788149f3b73SSrujana Challa #define RX_SA_BASE GENMASK_ULL(52, 7)
4789149f3b73SSrujana Challa
rvu_nix_lf_teardown(struct rvu * rvu,u16 pcifunc,int blkaddr,int nixlf)4790c554f9c1SGeetha sowjanya void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
4791c554f9c1SGeetha sowjanya {
4792c554f9c1SGeetha sowjanya struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
4793c554f9c1SGeetha sowjanya struct hwctx_disable_req ctx_req;
4794e37e08ffSHarman Kalra int pf = rvu_get_pf(pcifunc);
4795d1489208SHariprasad Kelam struct mac_ops *mac_ops;
4796e37e08ffSHarman Kalra u8 cgx_id, lmac_id;
4797149f3b73SSrujana Challa u64 sa_base;
4798e37e08ffSHarman Kalra void *cgxd;
4799c554f9c1SGeetha sowjanya int err;
4800c554f9c1SGeetha sowjanya
4801c554f9c1SGeetha sowjanya ctx_req.hdr.pcifunc = pcifunc;
4802c554f9c1SGeetha sowjanya
4803c554f9c1SGeetha sowjanya /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
480455307fcbSSubbaraya Sundeep rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
480555307fcbSSubbaraya Sundeep rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
4806c554f9c1SGeetha sowjanya nix_interface_deinit(rvu, pcifunc, nixlf);
4807c554f9c1SGeetha sowjanya nix_rx_sync(rvu, blkaddr);
4808c554f9c1SGeetha sowjanya nix_txschq_free(rvu, pcifunc);
4809c554f9c1SGeetha sowjanya
4810967db352SNaveen Mamindlapalli clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4811967db352SNaveen Mamindlapalli
4812a7faa68bSSubbaraya Sundeep rvu_cgx_start_stop_io(rvu, pcifunc, false);
4813a7faa68bSSubbaraya Sundeep
4814c554f9c1SGeetha sowjanya if (pfvf->sq_ctx) {
4815c554f9c1SGeetha sowjanya ctx_req.ctype = NIX_AQ_CTYPE_SQ;
4816c554f9c1SGeetha sowjanya err = nix_lf_hwctx_disable(rvu, &ctx_req);
4817c554f9c1SGeetha sowjanya if (err)
4818c554f9c1SGeetha sowjanya dev_err(rvu->dev, "SQ ctx disable failed\n");
4819c554f9c1SGeetha sowjanya }
4820c554f9c1SGeetha sowjanya
4821c554f9c1SGeetha sowjanya if (pfvf->rq_ctx) {
4822c554f9c1SGeetha sowjanya ctx_req.ctype = NIX_AQ_CTYPE_RQ;
4823c554f9c1SGeetha sowjanya err = nix_lf_hwctx_disable(rvu, &ctx_req);
4824c554f9c1SGeetha sowjanya if (err)
4825c554f9c1SGeetha sowjanya dev_err(rvu->dev, "RQ ctx disable failed\n");
4826c554f9c1SGeetha sowjanya }
4827c554f9c1SGeetha sowjanya
4828c554f9c1SGeetha sowjanya if (pfvf->cq_ctx) {
4829c554f9c1SGeetha sowjanya ctx_req.ctype = NIX_AQ_CTYPE_CQ;
4830c554f9c1SGeetha sowjanya err = nix_lf_hwctx_disable(rvu, &ctx_req);
4831c554f9c1SGeetha sowjanya if (err)
4832c554f9c1SGeetha sowjanya dev_err(rvu->dev, "CQ ctx disable failed\n");
4833c554f9c1SGeetha sowjanya }
4834c554f9c1SGeetha sowjanya
4835edadeb38SKiran Kumar K /* reset HW config done for Switch headers */
4836edadeb38SKiran Kumar K rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
4837edadeb38SKiran Kumar K (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
4838edadeb38SKiran Kumar K
4839e37e08ffSHarman Kalra /* Disabling CGX and NPC config done for PTP */
4840e37e08ffSHarman Kalra if (pfvf->hw_rx_tstamp_en) {
4841e37e08ffSHarman Kalra rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
4842e37e08ffSHarman Kalra cgxd = rvu_cgx_pdata(cgx_id, rvu);
4843d1489208SHariprasad Kelam mac_ops = get_mac_ops(cgxd);
4844d1489208SHariprasad Kelam mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
4845e37e08ffSHarman Kalra /* Undo NPC config done for PTP */
4846e37e08ffSHarman Kalra if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
4847e37e08ffSHarman Kalra dev_err(rvu->dev, "NPC config for PTP failed\n");
4848e37e08ffSHarman Kalra pfvf->hw_rx_tstamp_en = false;
4849e37e08ffSHarman Kalra }
4850e37e08ffSHarman Kalra
4851e7400038SHariprasad Kelam /* reset priority flow control config */
4852e7400038SHariprasad Kelam rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
4853e7400038SHariprasad Kelam
4854e7400038SHariprasad Kelam /* reset 802.3x flow control config */
4855e7400038SHariprasad Kelam rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
4856e7400038SHariprasad Kelam
4857c554f9c1SGeetha sowjanya nix_ctx_free(rvu, pfvf);
4858e8e095b3SSunil Goutham
4859e8e095b3SSunil Goutham nix_free_all_bandprof(rvu, pcifunc);
4860149f3b73SSrujana Challa
4861149f3b73SSrujana Challa sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
4862149f3b73SSrujana Challa if (FIELD_GET(RX_SA_BASE, sa_base)) {
4863149f3b73SSrujana Challa err = rvu_cpt_ctx_flush(rvu, pcifunc);
4864149f3b73SSrujana Challa if (err)
4865149f3b73SSrujana Challa dev_err(rvu->dev,
4866149f3b73SSrujana Challa "CPT ctx flush failed with error: %d\n", err);
4867149f3b73SSrujana Challa }
4868c554f9c1SGeetha sowjanya }
4869da5d32e1SNithin Dabilpuram
487042157217SZyta Szpak #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
487142157217SZyta Szpak
rvu_nix_lf_ptp_tx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)487242157217SZyta Szpak static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
487342157217SZyta Szpak {
487442157217SZyta Szpak struct rvu_hwinfo *hw = rvu->hw;
487542157217SZyta Szpak struct rvu_block *block;
487691c6945eSHariprasad Kelam int blkaddr, pf;
487742157217SZyta Szpak int nixlf;
487842157217SZyta Szpak u64 cfg;
487942157217SZyta Szpak
488091c6945eSHariprasad Kelam pf = rvu_get_pf(pcifunc);
488191c6945eSHariprasad Kelam if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
488291c6945eSHariprasad Kelam return 0;
488391c6945eSHariprasad Kelam
488442157217SZyta Szpak blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
488542157217SZyta Szpak if (blkaddr < 0)
488642157217SZyta Szpak return NIX_AF_ERR_AF_LF_INVALID;
488742157217SZyta Szpak
488842157217SZyta Szpak block = &hw->block[blkaddr];
488942157217SZyta Szpak nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
489042157217SZyta Szpak if (nixlf < 0)
489142157217SZyta Szpak return NIX_AF_ERR_AF_LF_INVALID;
489242157217SZyta Szpak
489342157217SZyta Szpak cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
489442157217SZyta Szpak
489542157217SZyta Szpak if (enable)
489642157217SZyta Szpak cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
489742157217SZyta Szpak else
489842157217SZyta Szpak cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
489942157217SZyta Szpak
490042157217SZyta Szpak rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
490142157217SZyta Szpak
490242157217SZyta Szpak return 0;
490342157217SZyta Szpak }
490442157217SZyta Szpak
rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)490542157217SZyta Szpak int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
490642157217SZyta Szpak struct msg_rsp *rsp)
490742157217SZyta Szpak {
490842157217SZyta Szpak return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
490942157217SZyta Szpak }
491042157217SZyta Szpak
rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)491142157217SZyta Szpak int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
491242157217SZyta Szpak struct msg_rsp *rsp)
491342157217SZyta Szpak {
491442157217SZyta Szpak return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
491542157217SZyta Szpak }
491642157217SZyta Szpak
rvu_mbox_handler_nix_lso_format_cfg(struct rvu * rvu,struct nix_lso_format_cfg * req,struct nix_lso_format_cfg_rsp * rsp)4917da5d32e1SNithin Dabilpuram int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
4918da5d32e1SNithin Dabilpuram struct nix_lso_format_cfg *req,
4919da5d32e1SNithin Dabilpuram struct nix_lso_format_cfg_rsp *rsp)
4920da5d32e1SNithin Dabilpuram {
4921da5d32e1SNithin Dabilpuram u16 pcifunc = req->hdr.pcifunc;
4922da5d32e1SNithin Dabilpuram struct nix_hw *nix_hw;
4923da5d32e1SNithin Dabilpuram struct rvu_pfvf *pfvf;
4924da5d32e1SNithin Dabilpuram int blkaddr, idx, f;
4925da5d32e1SNithin Dabilpuram u64 reg;
4926da5d32e1SNithin Dabilpuram
4927da5d32e1SNithin Dabilpuram pfvf = rvu_get_pfvf(rvu, pcifunc);
4928da5d32e1SNithin Dabilpuram blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4929da5d32e1SNithin Dabilpuram if (!pfvf->nixlf || blkaddr < 0)
4930da5d32e1SNithin Dabilpuram return NIX_AF_ERR_AF_LF_INVALID;
4931da5d32e1SNithin Dabilpuram
4932da5d32e1SNithin Dabilpuram nix_hw = get_nix_hw(rvu->hw, blkaddr);
4933da5d32e1SNithin Dabilpuram if (!nix_hw)
49347278c359SNaveen Mamindlapalli return NIX_AF_ERR_INVALID_NIXBLK;
4935da5d32e1SNithin Dabilpuram
4936da5d32e1SNithin Dabilpuram /* Find existing matching LSO format, if any */
4937da5d32e1SNithin Dabilpuram for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4938da5d32e1SNithin Dabilpuram for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4939da5d32e1SNithin Dabilpuram reg = rvu_read64(rvu, blkaddr,
4940da5d32e1SNithin Dabilpuram NIX_AF_LSO_FORMATX_FIELDX(idx, f));
4941da5d32e1SNithin Dabilpuram if (req->fields[f] != (reg & req->field_mask))
4942da5d32e1SNithin Dabilpuram break;
4943da5d32e1SNithin Dabilpuram }
4944da5d32e1SNithin Dabilpuram
4945da5d32e1SNithin Dabilpuram if (f == NIX_LSO_FIELD_MAX)
4946da5d32e1SNithin Dabilpuram break;
4947da5d32e1SNithin Dabilpuram }
4948da5d32e1SNithin Dabilpuram
4949da5d32e1SNithin Dabilpuram if (idx < nix_hw->lso.in_use) {
4950da5d32e1SNithin Dabilpuram /* Match found */
4951da5d32e1SNithin Dabilpuram rsp->lso_format_idx = idx;
4952da5d32e1SNithin Dabilpuram return 0;
4953da5d32e1SNithin Dabilpuram }
4954da5d32e1SNithin Dabilpuram
4955da5d32e1SNithin Dabilpuram if (nix_hw->lso.in_use == nix_hw->lso.total)
4956da5d32e1SNithin Dabilpuram return NIX_AF_ERR_LSO_CFG_FAIL;
4957da5d32e1SNithin Dabilpuram
4958da5d32e1SNithin Dabilpuram rsp->lso_format_idx = nix_hw->lso.in_use++;
4959da5d32e1SNithin Dabilpuram
4960da5d32e1SNithin Dabilpuram for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4961da5d32e1SNithin Dabilpuram rvu_write64(rvu, blkaddr,
4962da5d32e1SNithin Dabilpuram NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4963da5d32e1SNithin Dabilpuram req->fields[f]);
4964da5d32e1SNithin Dabilpuram
4965da5d32e1SNithin Dabilpuram return 0;
4966da5d32e1SNithin Dabilpuram }
49674f88ed2cSHariprasad Kelam
49684b5a3ab1SSrujana Challa #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
49694b5a3ab1SSrujana Challa #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
49704b5a3ab1SSrujana Challa #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
49714b5a3ab1SSrujana Challa #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
49724b5a3ab1SSrujana Challa
49734b5a3ab1SSrujana Challa #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
49744b5a3ab1SSrujana Challa #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
49754b5a3ab1SSrujana Challa #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
49764b5a3ab1SSrujana Challa
49775129bd8eSSrujana Challa #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
49785129bd8eSSrujana Challa #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
49795129bd8eSSrujana Challa #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
49805129bd8eSSrujana Challa
nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,int blkaddr)49814b5a3ab1SSrujana Challa static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
49824b5a3ab1SSrujana Challa int blkaddr)
49834b5a3ab1SSrujana Challa {
49844b5a3ab1SSrujana Challa u8 cpt_idx, cpt_blkaddr;
49854b5a3ab1SSrujana Challa u64 val;
49864b5a3ab1SSrujana Challa
49874b5a3ab1SSrujana Challa cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
49884b5a3ab1SSrujana Challa if (req->enable) {
4989d853f1d3SColin Ian King val = 0;
49904b5a3ab1SSrujana Challa /* Enable context prefetching */
49914b5a3ab1SSrujana Challa if (!is_rvu_otx2(rvu))
4992d853f1d3SColin Ian King val |= BIT_ULL(51);
49934b5a3ab1SSrujana Challa
49944b5a3ab1SSrujana Challa /* Set OPCODE and EGRP */
49954b5a3ab1SSrujana Challa val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
49964b5a3ab1SSrujana Challa val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
49974b5a3ab1SSrujana Challa val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
49984b5a3ab1SSrujana Challa val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
49994b5a3ab1SSrujana Challa
50004b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
50014b5a3ab1SSrujana Challa
50024b5a3ab1SSrujana Challa /* Set CPT queue for inline IPSec */
50034b5a3ab1SSrujana Challa val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
50044b5a3ab1SSrujana Challa val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
50054b5a3ab1SSrujana Challa req->inst_qsel.cpt_pf_func);
50064b5a3ab1SSrujana Challa
50074b5a3ab1SSrujana Challa if (!is_rvu_otx2(rvu)) {
50084b5a3ab1SSrujana Challa cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
50094b5a3ab1SSrujana Challa BLKADDR_CPT1;
50104b5a3ab1SSrujana Challa val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
50114b5a3ab1SSrujana Challa }
50124b5a3ab1SSrujana Challa
50134b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
50144b5a3ab1SSrujana Challa val);
50154b5a3ab1SSrujana Challa
50164b5a3ab1SSrujana Challa /* Set CPT credit */
50175129bd8eSSrujana Challa val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
50185129bd8eSSrujana Challa if ((val & 0x3FFFFF) != 0x3FFFFF)
50194b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
50205129bd8eSSrujana Challa 0x3FFFFF - val);
50215129bd8eSSrujana Challa
50225129bd8eSSrujana Challa val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
50235129bd8eSSrujana Challa val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
50245129bd8eSSrujana Challa val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
50255129bd8eSSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
50264b5a3ab1SSrujana Challa } else {
50274b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
50284b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
50294b5a3ab1SSrujana Challa 0x0);
50305129bd8eSSrujana Challa val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
50315129bd8eSSrujana Challa if ((val & 0x3FFFFF) != 0x3FFFFF)
50324b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
50335129bd8eSSrujana Challa 0x3FFFFF - val);
50344b5a3ab1SSrujana Challa }
50354b5a3ab1SSrujana Challa }
50364b5a3ab1SSrujana Challa
rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,struct msg_rsp * rsp)50374b5a3ab1SSrujana Challa int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
50384b5a3ab1SSrujana Challa struct nix_inline_ipsec_cfg *req,
50394b5a3ab1SSrujana Challa struct msg_rsp *rsp)
50404b5a3ab1SSrujana Challa {
50414b5a3ab1SSrujana Challa if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
50424b5a3ab1SSrujana Challa return 0;
50434b5a3ab1SSrujana Challa
50444b5a3ab1SSrujana Challa nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
50454b5a3ab1SSrujana Challa if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
50464b5a3ab1SSrujana Challa nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
50474b5a3ab1SSrujana Challa
50484b5a3ab1SSrujana Challa return 0;
50494b5a3ab1SSrujana Challa }
50504b5a3ab1SSrujana Challa
rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu * rvu,struct msg_req * req,struct nix_inline_ipsec_cfg * rsp)50515129bd8eSSrujana Challa int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
50525129bd8eSSrujana Challa struct msg_req *req,
50535129bd8eSSrujana Challa struct nix_inline_ipsec_cfg *rsp)
50545129bd8eSSrujana Challa
50555129bd8eSSrujana Challa {
50565129bd8eSSrujana Challa u64 val;
50575129bd8eSSrujana Challa
50585129bd8eSSrujana Challa if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
50595129bd8eSSrujana Challa return 0;
50605129bd8eSSrujana Challa
50615129bd8eSSrujana Challa val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
50625129bd8eSSrujana Challa rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
50635129bd8eSSrujana Challa rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
50645129bd8eSSrujana Challa rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
50655129bd8eSSrujana Challa rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
50665129bd8eSSrujana Challa
50675129bd8eSSrujana Challa val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
50685129bd8eSSrujana Challa rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
50695129bd8eSSrujana Challa rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
50705129bd8eSSrujana Challa rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
50715129bd8eSSrujana Challa
50725129bd8eSSrujana Challa return 0;
50735129bd8eSSrujana Challa }
50745129bd8eSSrujana Challa
rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu * rvu,struct nix_inline_ipsec_lf_cfg * req,struct msg_rsp * rsp)50754b5a3ab1SSrujana Challa int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
50764b5a3ab1SSrujana Challa struct nix_inline_ipsec_lf_cfg *req,
50774b5a3ab1SSrujana Challa struct msg_rsp *rsp)
50784b5a3ab1SSrujana Challa {
50794b5a3ab1SSrujana Challa int lf, blkaddr, err;
50804b5a3ab1SSrujana Challa u64 val;
50814b5a3ab1SSrujana Challa
50824b5a3ab1SSrujana Challa if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
50834b5a3ab1SSrujana Challa return 0;
50844b5a3ab1SSrujana Challa
50854b5a3ab1SSrujana Challa err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
50864b5a3ab1SSrujana Challa if (err)
50874b5a3ab1SSrujana Challa return err;
50884b5a3ab1SSrujana Challa
50894b5a3ab1SSrujana Challa if (req->enable) {
50904b5a3ab1SSrujana Challa /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
50914b5a3ab1SSrujana Challa val = (u64)req->ipsec_cfg0.tt << 44 |
50924b5a3ab1SSrujana Challa (u64)req->ipsec_cfg0.tag_const << 20 |
50934b5a3ab1SSrujana Challa (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
50944b5a3ab1SSrujana Challa req->ipsec_cfg0.lenm1_max;
50954b5a3ab1SSrujana Challa
50964b5a3ab1SSrujana Challa if (blkaddr == BLKADDR_NIX1)
50974b5a3ab1SSrujana Challa val |= BIT_ULL(46);
50984b5a3ab1SSrujana Challa
50994b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
51004b5a3ab1SSrujana Challa
51014b5a3ab1SSrujana Challa /* Set SA_IDX_W and SA_IDX_MAX */
51024b5a3ab1SSrujana Challa val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
51034b5a3ab1SSrujana Challa req->ipsec_cfg1.sa_idx_max;
51044b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
51054b5a3ab1SSrujana Challa
51064b5a3ab1SSrujana Challa /* Set SA base address */
51074b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
51084b5a3ab1SSrujana Challa req->sa_base_addr);
51094b5a3ab1SSrujana Challa } else {
51104b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
51114b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
51124b5a3ab1SSrujana Challa rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
51134b5a3ab1SSrujana Challa 0x0);
51144b5a3ab1SSrujana Challa }
51154b5a3ab1SSrujana Challa
51164b5a3ab1SSrujana Challa return 0;
51174b5a3ab1SSrujana Challa }
51185129bd8eSSrujana Challa
rvu_nix_reset_mac(struct rvu_pfvf * pfvf,int pcifunc)51194f88ed2cSHariprasad Kelam void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
51204f88ed2cSHariprasad Kelam {
51214f88ed2cSHariprasad Kelam bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
51224f88ed2cSHariprasad Kelam
51234f88ed2cSHariprasad Kelam /* overwrite vf mac address with default_mac */
51244f88ed2cSHariprasad Kelam if (from_vf)
51254f88ed2cSHariprasad Kelam ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
51264f88ed2cSHariprasad Kelam }
5127e8e095b3SSunil Goutham
5128e8e095b3SSunil Goutham /* NIX ingress policers or bandwidth profiles APIs */
nix_config_rx_pkt_policer_precolor(struct rvu * rvu,int blkaddr)5129e8e095b3SSunil Goutham static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
5130e8e095b3SSunil Goutham {
5131e8e095b3SSunil Goutham struct npc_lt_def_cfg defs, *ltdefs;
5132e8e095b3SSunil Goutham
5133e8e095b3SSunil Goutham ltdefs = &defs;
5134e8e095b3SSunil Goutham memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
5135e8e095b3SSunil Goutham
5136e8e095b3SSunil Goutham /* Extract PCP and DEI fields from outer VLAN from byte offset
5137e8e095b3SSunil Goutham * 2 from the start of LB_PTR (ie TAG).
5138e8e095b3SSunil Goutham * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5139e8e095b3SSunil Goutham * fields are considered when 'Tunnel enable' is set in profile.
5140e8e095b3SSunil Goutham */
5141e8e095b3SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
5142e8e095b3SSunil Goutham (2UL << 12) | (ltdefs->ovlan.lid << 8) |
5143e8e095b3SSunil Goutham (ltdefs->ovlan.ltype_match << 4) |
5144e8e095b3SSunil Goutham ltdefs->ovlan.ltype_mask);
5145e8e095b3SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
5146e8e095b3SSunil Goutham (2UL << 12) | (ltdefs->ivlan.lid << 8) |
5147e8e095b3SSunil Goutham (ltdefs->ivlan.ltype_match << 4) |
5148e8e095b3SSunil Goutham ltdefs->ivlan.ltype_mask);
5149e8e095b3SSunil Goutham
5150e8e095b3SSunil Goutham /* DSCP field in outer and tunneled IPv4 packets */
5151e8e095b3SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
5152e8e095b3SSunil Goutham (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
5153e8e095b3SSunil Goutham (ltdefs->rx_oip4.ltype_match << 4) |
5154e8e095b3SSunil Goutham ltdefs->rx_oip4.ltype_mask);
5155e8e095b3SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
5156e8e095b3SSunil Goutham (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
5157e8e095b3SSunil Goutham (ltdefs->rx_iip4.ltype_match << 4) |
5158e8e095b3SSunil Goutham ltdefs->rx_iip4.ltype_mask);
5159e8e095b3SSunil Goutham
5160e8e095b3SSunil Goutham /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5161e8e095b3SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
5162e8e095b3SSunil Goutham (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
5163e8e095b3SSunil Goutham (ltdefs->rx_oip6.ltype_match << 4) |
5164e8e095b3SSunil Goutham ltdefs->rx_oip6.ltype_mask);
5165e8e095b3SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
5166e8e095b3SSunil Goutham (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
5167e8e095b3SSunil Goutham (ltdefs->rx_iip6.ltype_match << 4) |
5168e8e095b3SSunil Goutham ltdefs->rx_iip6.ltype_mask);
5169e8e095b3SSunil Goutham }
5170e8e095b3SSunil Goutham
nix_init_policer_context(struct rvu * rvu,struct nix_hw * nix_hw,int layer,int prof_idx)5171e8e095b3SSunil Goutham static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
5172e8e095b3SSunil Goutham int layer, int prof_idx)
5173e8e095b3SSunil Goutham {
5174e8e095b3SSunil Goutham struct nix_cn10k_aq_enq_req aq_req;
5175e8e095b3SSunil Goutham int rc;
5176e8e095b3SSunil Goutham
5177e8e095b3SSunil Goutham memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5178e8e095b3SSunil Goutham
5179e8e095b3SSunil Goutham aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
5180e8e095b3SSunil Goutham aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5181e8e095b3SSunil Goutham aq_req.op = NIX_AQ_INSTOP_INIT;
5182e8e095b3SSunil Goutham
5183e8e095b3SSunil Goutham /* Context is all zeros, submit to AQ */
5184e8e095b3SSunil Goutham rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5185e8e095b3SSunil Goutham (struct nix_aq_enq_req *)&aq_req, NULL);
5186e8e095b3SSunil Goutham if (rc)
5187e8e095b3SSunil Goutham dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
5188e8e095b3SSunil Goutham layer, prof_idx);
5189e8e095b3SSunil Goutham return rc;
5190e8e095b3SSunil Goutham }
5191e8e095b3SSunil Goutham
nix_setup_ipolicers(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)5192e8e095b3SSunil Goutham static int nix_setup_ipolicers(struct rvu *rvu,
5193e8e095b3SSunil Goutham struct nix_hw *nix_hw, int blkaddr)
5194e8e095b3SSunil Goutham {
5195e8e095b3SSunil Goutham struct rvu_hwinfo *hw = rvu->hw;
5196e8e095b3SSunil Goutham struct nix_ipolicer *ipolicer;
5197e8e095b3SSunil Goutham int err, layer, prof_idx;
5198e8e095b3SSunil Goutham u64 cfg;
5199e8e095b3SSunil Goutham
5200e8e095b3SSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5201e8e095b3SSunil Goutham if (!(cfg & BIT_ULL(61))) {
5202e8e095b3SSunil Goutham hw->cap.ipolicer = false;
5203e8e095b3SSunil Goutham return 0;
5204e8e095b3SSunil Goutham }
5205e8e095b3SSunil Goutham
5206e8e095b3SSunil Goutham hw->cap.ipolicer = true;
5207e8e095b3SSunil Goutham nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
5208e8e095b3SSunil Goutham sizeof(*ipolicer), GFP_KERNEL);
5209e8e095b3SSunil Goutham if (!nix_hw->ipolicer)
5210e8e095b3SSunil Goutham return -ENOMEM;
5211e8e095b3SSunil Goutham
5212e8e095b3SSunil Goutham cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
5213e8e095b3SSunil Goutham
5214e8e095b3SSunil Goutham for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5215e8e095b3SSunil Goutham ipolicer = &nix_hw->ipolicer[layer];
5216e8e095b3SSunil Goutham switch (layer) {
5217e8e095b3SSunil Goutham case BAND_PROF_LEAF_LAYER:
5218e8e095b3SSunil Goutham ipolicer->band_prof.max = cfg & 0XFFFF;
5219e8e095b3SSunil Goutham break;
5220e8e095b3SSunil Goutham case BAND_PROF_MID_LAYER:
5221e8e095b3SSunil Goutham ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
5222e8e095b3SSunil Goutham break;
5223e8e095b3SSunil Goutham case BAND_PROF_TOP_LAYER:
5224e8e095b3SSunil Goutham ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
5225e8e095b3SSunil Goutham break;
5226e8e095b3SSunil Goutham }
5227e8e095b3SSunil Goutham
5228e8e095b3SSunil Goutham if (!ipolicer->band_prof.max)
5229e8e095b3SSunil Goutham continue;
5230e8e095b3SSunil Goutham
5231e8e095b3SSunil Goutham err = rvu_alloc_bitmap(&ipolicer->band_prof);
5232e8e095b3SSunil Goutham if (err)
5233e8e095b3SSunil Goutham return err;
5234e8e095b3SSunil Goutham
5235e8e095b3SSunil Goutham ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
5236e8e095b3SSunil Goutham ipolicer->band_prof.max,
5237e8e095b3SSunil Goutham sizeof(u16), GFP_KERNEL);
5238e8e095b3SSunil Goutham if (!ipolicer->pfvf_map)
5239e8e095b3SSunil Goutham return -ENOMEM;
5240e8e095b3SSunil Goutham
5241e8e095b3SSunil Goutham ipolicer->match_id = devm_kcalloc(rvu->dev,
5242e8e095b3SSunil Goutham ipolicer->band_prof.max,
5243e8e095b3SSunil Goutham sizeof(u16), GFP_KERNEL);
5244e8e095b3SSunil Goutham if (!ipolicer->match_id)
5245e8e095b3SSunil Goutham return -ENOMEM;
5246e8e095b3SSunil Goutham
5247e8e095b3SSunil Goutham for (prof_idx = 0;
5248e8e095b3SSunil Goutham prof_idx < ipolicer->band_prof.max; prof_idx++) {
5249e8e095b3SSunil Goutham /* Set AF as current owner for INIT ops to succeed */
5250e8e095b3SSunil Goutham ipolicer->pfvf_map[prof_idx] = 0x00;
5251e8e095b3SSunil Goutham
5252e8e095b3SSunil Goutham /* There is no enable bit in the profile context,
5253e8e095b3SSunil Goutham * so no context disable. So let's INIT them here
5254e8e095b3SSunil Goutham * so that PF/VF later on have to just do WRITE to
5255e8e095b3SSunil Goutham * setup policer rates and config.
5256e8e095b3SSunil Goutham */
5257e8e095b3SSunil Goutham err = nix_init_policer_context(rvu, nix_hw,
5258e8e095b3SSunil Goutham layer, prof_idx);
5259e8e095b3SSunil Goutham if (err)
5260e8e095b3SSunil Goutham return err;
5261e8e095b3SSunil Goutham }
5262e8e095b3SSunil Goutham
5263e8e095b3SSunil Goutham /* Allocate memory for maintaining ref_counts for MID level
5264e8e095b3SSunil Goutham * profiles, this will be needed for leaf layer profiles'
5265e8e095b3SSunil Goutham * aggregation.
5266e8e095b3SSunil Goutham */
5267e8e095b3SSunil Goutham if (layer != BAND_PROF_MID_LAYER)
5268e8e095b3SSunil Goutham continue;
5269e8e095b3SSunil Goutham
5270e8e095b3SSunil Goutham ipolicer->ref_count = devm_kcalloc(rvu->dev,
5271e8e095b3SSunil Goutham ipolicer->band_prof.max,
5272e8e095b3SSunil Goutham sizeof(u16), GFP_KERNEL);
5273cd07eaddSJiasheng Jiang if (!ipolicer->ref_count)
5274cd07eaddSJiasheng Jiang return -ENOMEM;
5275e8e095b3SSunil Goutham }
5276e8e095b3SSunil Goutham
5277e8e095b3SSunil Goutham /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
5278e8e095b3SSunil Goutham rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
5279e8e095b3SSunil Goutham
5280e8e095b3SSunil Goutham nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
5281e8e095b3SSunil Goutham
5282e8e095b3SSunil Goutham return 0;
5283e8e095b3SSunil Goutham }
5284e8e095b3SSunil Goutham
nix_ipolicer_freemem(struct rvu * rvu,struct nix_hw * nix_hw)528507cccffdSGeetha sowjanya static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5286e8e095b3SSunil Goutham {
5287e8e095b3SSunil Goutham struct nix_ipolicer *ipolicer;
5288e8e095b3SSunil Goutham int layer;
5289e8e095b3SSunil Goutham
529007cccffdSGeetha sowjanya if (!rvu->hw->cap.ipolicer)
529107cccffdSGeetha sowjanya return;
529207cccffdSGeetha sowjanya
5293e8e095b3SSunil Goutham for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5294e8e095b3SSunil Goutham ipolicer = &nix_hw->ipolicer[layer];
5295e8e095b3SSunil Goutham
5296e8e095b3SSunil Goutham if (!ipolicer->band_prof.max)
5297e8e095b3SSunil Goutham continue;
5298e8e095b3SSunil Goutham
5299e8e095b3SSunil Goutham kfree(ipolicer->band_prof.bmap);
5300e8e095b3SSunil Goutham }
5301e8e095b3SSunil Goutham }
5302e8e095b3SSunil Goutham
nix_verify_bandprof(struct nix_cn10k_aq_enq_req * req,struct nix_hw * nix_hw,u16 pcifunc)5303e8e095b3SSunil Goutham static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5304e8e095b3SSunil Goutham struct nix_hw *nix_hw, u16 pcifunc)
5305e8e095b3SSunil Goutham {
5306e8e095b3SSunil Goutham struct nix_ipolicer *ipolicer;
5307e8e095b3SSunil Goutham int layer, hi_layer, prof_idx;
5308e8e095b3SSunil Goutham
5309e8e095b3SSunil Goutham /* Bits [15:14] in profile index represent layer */
5310e8e095b3SSunil Goutham layer = (req->qidx >> 14) & 0x03;
5311e8e095b3SSunil Goutham prof_idx = req->qidx & 0x3FFF;
5312e8e095b3SSunil Goutham
5313e8e095b3SSunil Goutham ipolicer = &nix_hw->ipolicer[layer];
5314e8e095b3SSunil Goutham if (prof_idx >= ipolicer->band_prof.max)
5315e8e095b3SSunil Goutham return -EINVAL;
5316e8e095b3SSunil Goutham
5317e8e095b3SSunil Goutham /* Check if the profile is allocated to the requesting PCIFUNC or not
5318e8e095b3SSunil Goutham * with the exception of AF. AF is allowed to read and update contexts.
5319e8e095b3SSunil Goutham */
5320e8e095b3SSunil Goutham if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5321e8e095b3SSunil Goutham return -EINVAL;
5322e8e095b3SSunil Goutham
5323e8e095b3SSunil Goutham /* If this profile is linked to higher layer profile then check
5324e8e095b3SSunil Goutham * if that profile is also allocated to the requesting PCIFUNC
5325e8e095b3SSunil Goutham * or not.
5326e8e095b3SSunil Goutham */
5327e8e095b3SSunil Goutham if (!req->prof.hl_en)
5328e8e095b3SSunil Goutham return 0;
5329e8e095b3SSunil Goutham
5330e8e095b3SSunil Goutham /* Leaf layer profile can link only to mid layer and
5331e8e095b3SSunil Goutham * mid layer to top layer.
5332e8e095b3SSunil Goutham */
5333e8e095b3SSunil Goutham if (layer == BAND_PROF_LEAF_LAYER)
5334e8e095b3SSunil Goutham hi_layer = BAND_PROF_MID_LAYER;
5335e8e095b3SSunil Goutham else if (layer == BAND_PROF_MID_LAYER)
5336e8e095b3SSunil Goutham hi_layer = BAND_PROF_TOP_LAYER;
5337e8e095b3SSunil Goutham else
5338e8e095b3SSunil Goutham return -EINVAL;
5339e8e095b3SSunil Goutham
5340e8e095b3SSunil Goutham ipolicer = &nix_hw->ipolicer[hi_layer];
5341e8e095b3SSunil Goutham prof_idx = req->prof.band_prof_id;
5342e8e095b3SSunil Goutham if (prof_idx >= ipolicer->band_prof.max ||
5343e8e095b3SSunil Goutham ipolicer->pfvf_map[prof_idx] != pcifunc)
5344e8e095b3SSunil Goutham return -EINVAL;
5345e8e095b3SSunil Goutham
5346e8e095b3SSunil Goutham return 0;
5347e8e095b3SSunil Goutham }
5348e8e095b3SSunil Goutham
rvu_mbox_handler_nix_bandprof_alloc(struct rvu * rvu,struct nix_bandprof_alloc_req * req,struct nix_bandprof_alloc_rsp * rsp)5349e8e095b3SSunil Goutham int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5350e8e095b3SSunil Goutham struct nix_bandprof_alloc_req *req,
5351e8e095b3SSunil Goutham struct nix_bandprof_alloc_rsp *rsp)
5352e8e095b3SSunil Goutham {
5353e8e095b3SSunil Goutham int blkaddr, layer, prof, idx, err;
5354e8e095b3SSunil Goutham u16 pcifunc = req->hdr.pcifunc;
5355e8e095b3SSunil Goutham struct nix_ipolicer *ipolicer;
5356e8e095b3SSunil Goutham struct nix_hw *nix_hw;
5357e8e095b3SSunil Goutham
5358e8e095b3SSunil Goutham if (!rvu->hw->cap.ipolicer)
5359e8e095b3SSunil Goutham return NIX_AF_ERR_IPOLICER_NOTSUPP;
5360e8e095b3SSunil Goutham
5361e8e095b3SSunil Goutham err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5362e8e095b3SSunil Goutham if (err)
5363e8e095b3SSunil Goutham return err;
5364e8e095b3SSunil Goutham
5365e8e095b3SSunil Goutham mutex_lock(&rvu->rsrc_lock);
5366e8e095b3SSunil Goutham for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5367e8e095b3SSunil Goutham if (layer == BAND_PROF_INVAL_LAYER)
5368e8e095b3SSunil Goutham continue;
5369e8e095b3SSunil Goutham if (!req->prof_count[layer])
5370e8e095b3SSunil Goutham continue;
5371e8e095b3SSunil Goutham
5372e8e095b3SSunil Goutham ipolicer = &nix_hw->ipolicer[layer];
5373e8e095b3SSunil Goutham for (idx = 0; idx < req->prof_count[layer]; idx++) {
5374e8e095b3SSunil Goutham /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5375e8e095b3SSunil Goutham if (idx == MAX_BANDPROF_PER_PFFUNC)
5376e8e095b3SSunil Goutham break;
5377e8e095b3SSunil Goutham
5378e8e095b3SSunil Goutham prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5379e8e095b3SSunil Goutham if (prof < 0)
5380e8e095b3SSunil Goutham break;
5381e8e095b3SSunil Goutham rsp->prof_count[layer]++;
5382e8e095b3SSunil Goutham rsp->prof_idx[layer][idx] = prof;
5383e8e095b3SSunil Goutham ipolicer->pfvf_map[prof] = pcifunc;
5384e8e095b3SSunil Goutham }
5385e8e095b3SSunil Goutham }
5386e8e095b3SSunil Goutham mutex_unlock(&rvu->rsrc_lock);
5387e8e095b3SSunil Goutham return 0;
5388e8e095b3SSunil Goutham }
5389e8e095b3SSunil Goutham
nix_free_all_bandprof(struct rvu * rvu,u16 pcifunc)5390e8e095b3SSunil Goutham static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5391e8e095b3SSunil Goutham {
5392e8e095b3SSunil Goutham int blkaddr, layer, prof_idx, err;
5393e8e095b3SSunil Goutham struct nix_ipolicer *ipolicer;
5394e8e095b3SSunil Goutham struct nix_hw *nix_hw;
5395e8e095b3SSunil Goutham
5396e8e095b3SSunil Goutham if (!rvu->hw->cap.ipolicer)
5397e8e095b3SSunil Goutham return NIX_AF_ERR_IPOLICER_NOTSUPP;
5398e8e095b3SSunil Goutham
5399e8e095b3SSunil Goutham err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5400e8e095b3SSunil Goutham if (err)
5401e8e095b3SSunil Goutham return err;
5402e8e095b3SSunil Goutham
5403e8e095b3SSunil Goutham mutex_lock(&rvu->rsrc_lock);
5404e8e095b3SSunil Goutham /* Free all the profiles allocated to the PCIFUNC */
5405e8e095b3SSunil Goutham for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5406e8e095b3SSunil Goutham if (layer == BAND_PROF_INVAL_LAYER)
5407e8e095b3SSunil Goutham continue;
5408e8e095b3SSunil Goutham ipolicer = &nix_hw->ipolicer[layer];
5409e8e095b3SSunil Goutham
5410e8e095b3SSunil Goutham for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5411e8e095b3SSunil Goutham if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5412e8e095b3SSunil Goutham continue;
5413e8e095b3SSunil Goutham
5414e8e095b3SSunil Goutham /* Clear ratelimit aggregation, if any */
5415e8e095b3SSunil Goutham if (layer == BAND_PROF_LEAF_LAYER &&
5416e8e095b3SSunil Goutham ipolicer->match_id[prof_idx])
5417e8e095b3SSunil Goutham nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5418e8e095b3SSunil Goutham
5419e8e095b3SSunil Goutham ipolicer->pfvf_map[prof_idx] = 0x00;
5420e8e095b3SSunil Goutham ipolicer->match_id[prof_idx] = 0;
5421e8e095b3SSunil Goutham rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5422e8e095b3SSunil Goutham }
5423e8e095b3SSunil Goutham }
5424e8e095b3SSunil Goutham mutex_unlock(&rvu->rsrc_lock);
5425e8e095b3SSunil Goutham return 0;
5426e8e095b3SSunil Goutham }
5427e8e095b3SSunil Goutham
rvu_mbox_handler_nix_bandprof_free(struct rvu * rvu,struct nix_bandprof_free_req * req,struct msg_rsp * rsp)5428e8e095b3SSunil Goutham int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5429e8e095b3SSunil Goutham struct nix_bandprof_free_req *req,
5430e8e095b3SSunil Goutham struct msg_rsp *rsp)
5431e8e095b3SSunil Goutham {
5432e8e095b3SSunil Goutham int blkaddr, layer, prof_idx, idx, err;
5433e8e095b3SSunil Goutham u16 pcifunc = req->hdr.pcifunc;
5434e8e095b3SSunil Goutham struct nix_ipolicer *ipolicer;
5435e8e095b3SSunil Goutham struct nix_hw *nix_hw;
5436e8e095b3SSunil Goutham
5437e8e095b3SSunil Goutham if (req->free_all)
5438e8e095b3SSunil Goutham return nix_free_all_bandprof(rvu, pcifunc);
5439e8e095b3SSunil Goutham
5440e8e095b3SSunil Goutham if (!rvu->hw->cap.ipolicer)
5441e8e095b3SSunil Goutham return NIX_AF_ERR_IPOLICER_NOTSUPP;
5442e8e095b3SSunil Goutham
5443e8e095b3SSunil Goutham err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5444e8e095b3SSunil Goutham if (err)
5445e8e095b3SSunil Goutham return err;
5446e8e095b3SSunil Goutham
5447e8e095b3SSunil Goutham mutex_lock(&rvu->rsrc_lock);
5448e8e095b3SSunil Goutham /* Free the requested profile indices */
5449e8e095b3SSunil Goutham for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5450e8e095b3SSunil Goutham if (layer == BAND_PROF_INVAL_LAYER)
5451e8e095b3SSunil Goutham continue;
5452e8e095b3SSunil Goutham if (!req->prof_count[layer])
5453e8e095b3SSunil Goutham continue;
5454e8e095b3SSunil Goutham
5455e8e095b3SSunil Goutham ipolicer = &nix_hw->ipolicer[layer];
5456e8e095b3SSunil Goutham for (idx = 0; idx < req->prof_count[layer]; idx++) {
5457f8e2ec79SElena Salomatkina if (idx == MAX_BANDPROF_PER_PFFUNC)
5458f8e2ec79SElena Salomatkina break;
5459e8e095b3SSunil Goutham prof_idx = req->prof_idx[layer][idx];
5460e8e095b3SSunil Goutham if (prof_idx >= ipolicer->band_prof.max ||
5461e8e095b3SSunil Goutham ipolicer->pfvf_map[prof_idx] != pcifunc)
5462e8e095b3SSunil Goutham continue;
5463e8e095b3SSunil Goutham
5464e8e095b3SSunil Goutham /* Clear ratelimit aggregation, if any */
5465e8e095b3SSunil Goutham if (layer == BAND_PROF_LEAF_LAYER &&
5466e8e095b3SSunil Goutham ipolicer->match_id[prof_idx])
5467e8e095b3SSunil Goutham nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5468e8e095b3SSunil Goutham
5469e8e095b3SSunil Goutham ipolicer->pfvf_map[prof_idx] = 0x00;
5470e8e095b3SSunil Goutham ipolicer->match_id[prof_idx] = 0;
5471e8e095b3SSunil Goutham rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5472e8e095b3SSunil Goutham }
5473e8e095b3SSunil Goutham }
5474e8e095b3SSunil Goutham mutex_unlock(&rvu->rsrc_lock);
5475e8e095b3SSunil Goutham return 0;
5476e8e095b3SSunil Goutham }
5477e8e095b3SSunil Goutham
nix_aq_context_read(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u16 pcifunc,u8 ctype,u32 qidx)5478e7d89717SSunil Goutham int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
5479e8e095b3SSunil Goutham struct nix_cn10k_aq_enq_req *aq_req,
5480e8e095b3SSunil Goutham struct nix_cn10k_aq_enq_rsp *aq_rsp,
5481e8e095b3SSunil Goutham u16 pcifunc, u8 ctype, u32 qidx)
5482e8e095b3SSunil Goutham {
5483e8e095b3SSunil Goutham memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5484e8e095b3SSunil Goutham aq_req->hdr.pcifunc = pcifunc;
5485e8e095b3SSunil Goutham aq_req->ctype = ctype;
5486e8e095b3SSunil Goutham aq_req->op = NIX_AQ_INSTOP_READ;
5487e8e095b3SSunil Goutham aq_req->qidx = qidx;
5488e8e095b3SSunil Goutham
5489e8e095b3SSunil Goutham return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5490e8e095b3SSunil Goutham (struct nix_aq_enq_req *)aq_req,
5491e8e095b3SSunil Goutham (struct nix_aq_enq_rsp *)aq_rsp);
5492e8e095b3SSunil Goutham }
5493e8e095b3SSunil Goutham
nix_ipolicer_map_leaf_midprofs(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u32 leaf_prof,u16 mid_prof)5494e8e095b3SSunil Goutham static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
5495e8e095b3SSunil Goutham struct nix_hw *nix_hw,
5496e8e095b3SSunil Goutham struct nix_cn10k_aq_enq_req *aq_req,
5497e8e095b3SSunil Goutham struct nix_cn10k_aq_enq_rsp *aq_rsp,
5498e8e095b3SSunil Goutham u32 leaf_prof, u16 mid_prof)
5499e8e095b3SSunil Goutham {
5500e8e095b3SSunil Goutham memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5501e8e095b3SSunil Goutham aq_req->hdr.pcifunc = 0x00;
5502e8e095b3SSunil Goutham aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
5503e8e095b3SSunil Goutham aq_req->op = NIX_AQ_INSTOP_WRITE;
5504e8e095b3SSunil Goutham aq_req->qidx = leaf_prof;
5505e8e095b3SSunil Goutham
5506e8e095b3SSunil Goutham aq_req->prof.band_prof_id = mid_prof;
5507e8e095b3SSunil Goutham aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
5508e8e095b3SSunil Goutham aq_req->prof.hl_en = 1;
5509e8e095b3SSunil Goutham aq_req->prof_mask.hl_en = 1;
5510e8e095b3SSunil Goutham
5511e8e095b3SSunil Goutham return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5512e8e095b3SSunil Goutham (struct nix_aq_enq_req *)aq_req,
5513e8e095b3SSunil Goutham (struct nix_aq_enq_rsp *)aq_rsp);
5514e8e095b3SSunil Goutham }
5515e8e095b3SSunil Goutham
rvu_nix_setup_ratelimit_aggr(struct rvu * rvu,u16 pcifunc,u16 rq_idx,u16 match_id)5516e8e095b3SSunil Goutham int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
5517e8e095b3SSunil Goutham u16 rq_idx, u16 match_id)
5518e8e095b3SSunil Goutham {
5519e8e095b3SSunil Goutham int leaf_prof, mid_prof, leaf_match;
5520e8e095b3SSunil Goutham struct nix_cn10k_aq_enq_req aq_req;
5521e8e095b3SSunil Goutham struct nix_cn10k_aq_enq_rsp aq_rsp;
5522e8e095b3SSunil Goutham struct nix_ipolicer *ipolicer;
5523e8e095b3SSunil Goutham struct nix_hw *nix_hw;
5524e8e095b3SSunil Goutham int blkaddr, idx, rc;
5525e8e095b3SSunil Goutham
5526e8e095b3SSunil Goutham if (!rvu->hw->cap.ipolicer)
5527e8e095b3SSunil Goutham return 0;
5528e8e095b3SSunil Goutham
5529e8e095b3SSunil Goutham rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5530e8e095b3SSunil Goutham if (rc)
5531e8e095b3SSunil Goutham return rc;
5532e8e095b3SSunil Goutham
5533e8e095b3SSunil Goutham /* Fetch the RQ's context to see if policing is enabled */
5534e8e095b3SSunil Goutham rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
5535e8e095b3SSunil Goutham NIX_AQ_CTYPE_RQ, rq_idx);
5536e8e095b3SSunil Goutham if (rc) {
5537e8e095b3SSunil Goutham dev_err(rvu->dev,
5538e8e095b3SSunil Goutham "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5539e8e095b3SSunil Goutham __func__, rq_idx, pcifunc);
5540e8e095b3SSunil Goutham return rc;
5541e8e095b3SSunil Goutham }
5542e8e095b3SSunil Goutham
5543e8e095b3SSunil Goutham if (!aq_rsp.rq.policer_ena)
5544e8e095b3SSunil Goutham return 0;
5545e8e095b3SSunil Goutham
5546e8e095b3SSunil Goutham /* Get the bandwidth profile ID mapped to this RQ */
5547e8e095b3SSunil Goutham leaf_prof = aq_rsp.rq.band_prof_id;
5548e8e095b3SSunil Goutham
5549e8e095b3SSunil Goutham ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
5550e8e095b3SSunil Goutham ipolicer->match_id[leaf_prof] = match_id;
5551e8e095b3SSunil Goutham
5552e8e095b3SSunil Goutham /* Check if any other leaf profile is marked with same match_id */
5553e8e095b3SSunil Goutham for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
5554e8e095b3SSunil Goutham if (idx == leaf_prof)
5555e8e095b3SSunil Goutham continue;
5556e8e095b3SSunil Goutham if (ipolicer->match_id[idx] != match_id)
5557e8e095b3SSunil Goutham continue;
5558e8e095b3SSunil Goutham
5559e8e095b3SSunil Goutham leaf_match = idx;
5560e8e095b3SSunil Goutham break;
5561e8e095b3SSunil Goutham }
5562e8e095b3SSunil Goutham
5563e8e095b3SSunil Goutham if (idx == ipolicer->band_prof.max)
5564e8e095b3SSunil Goutham return 0;
5565e8e095b3SSunil Goutham
5566e8e095b3SSunil Goutham /* Fetch the matching profile's context to check if it's already
5567e8e095b3SSunil Goutham * mapped to a mid level profile.
5568e8e095b3SSunil Goutham */
5569e8e095b3SSunil Goutham rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5570e8e095b3SSunil Goutham NIX_AQ_CTYPE_BANDPROF, leaf_match);
5571e8e095b3SSunil Goutham if (rc) {
5572e8e095b3SSunil Goutham dev_err(rvu->dev,
5573e8e095b3SSunil Goutham "%s: Failed to fetch context of leaf profile %d\n",
5574e8e095b3SSunil Goutham __func__, leaf_match);
5575e8e095b3SSunil Goutham return rc;
5576e8e095b3SSunil Goutham }
5577e8e095b3SSunil Goutham
5578e8e095b3SSunil Goutham ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5579e8e095b3SSunil Goutham if (aq_rsp.prof.hl_en) {
5580e8e095b3SSunil Goutham /* Get Mid layer prof index and map leaf_prof index
5581e8e095b3SSunil Goutham * also such that flows that are being steered
5582e8e095b3SSunil Goutham * to different RQs and marked with same match_id
5583e8e095b3SSunil Goutham * are rate limited in a aggregate fashion
5584e8e095b3SSunil Goutham */
5585e8e095b3SSunil Goutham mid_prof = aq_rsp.prof.band_prof_id;
5586e8e095b3SSunil Goutham rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5587e8e095b3SSunil Goutham &aq_req, &aq_rsp,
5588e8e095b3SSunil Goutham leaf_prof, mid_prof);
5589e8e095b3SSunil Goutham if (rc) {
5590e8e095b3SSunil Goutham dev_err(rvu->dev,
5591e8e095b3SSunil Goutham "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5592e8e095b3SSunil Goutham __func__, leaf_prof, mid_prof);
5593e8e095b3SSunil Goutham goto exit;
5594e8e095b3SSunil Goutham }
5595e8e095b3SSunil Goutham
5596e8e095b3SSunil Goutham mutex_lock(&rvu->rsrc_lock);
5597e8e095b3SSunil Goutham ipolicer->ref_count[mid_prof]++;
5598e8e095b3SSunil Goutham mutex_unlock(&rvu->rsrc_lock);
5599e8e095b3SSunil Goutham goto exit;
5600e8e095b3SSunil Goutham }
5601e8e095b3SSunil Goutham
5602e8e095b3SSunil Goutham /* Allocate a mid layer profile and
5603e8e095b3SSunil Goutham * map both 'leaf_prof' and 'leaf_match' profiles to it.
5604e8e095b3SSunil Goutham */
5605e8e095b3SSunil Goutham mutex_lock(&rvu->rsrc_lock);
5606e8e095b3SSunil Goutham mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5607e8e095b3SSunil Goutham if (mid_prof < 0) {
5608e8e095b3SSunil Goutham dev_err(rvu->dev,
5609e8e095b3SSunil Goutham "%s: Unable to allocate mid layer profile\n", __func__);
5610e8e095b3SSunil Goutham mutex_unlock(&rvu->rsrc_lock);
5611e8e095b3SSunil Goutham goto exit;
5612e8e095b3SSunil Goutham }
5613e8e095b3SSunil Goutham mutex_unlock(&rvu->rsrc_lock);
5614e8e095b3SSunil Goutham ipolicer->pfvf_map[mid_prof] = 0x00;
5615e8e095b3SSunil Goutham ipolicer->ref_count[mid_prof] = 0;
5616e8e095b3SSunil Goutham
5617e8e095b3SSunil Goutham /* Initialize mid layer profile same as 'leaf_prof' */
5618e8e095b3SSunil Goutham rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5619e8e095b3SSunil Goutham NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5620e8e095b3SSunil Goutham if (rc) {
5621e8e095b3SSunil Goutham dev_err(rvu->dev,
5622e8e095b3SSunil Goutham "%s: Failed to fetch context of leaf profile %d\n",
5623e8e095b3SSunil Goutham __func__, leaf_prof);
5624e8e095b3SSunil Goutham goto exit;
5625e8e095b3SSunil Goutham }
5626e8e095b3SSunil Goutham
5627e8e095b3SSunil Goutham memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5628e8e095b3SSunil Goutham aq_req.hdr.pcifunc = 0x00;
5629e8e095b3SSunil Goutham aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
5630e8e095b3SSunil Goutham aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5631e8e095b3SSunil Goutham aq_req.op = NIX_AQ_INSTOP_WRITE;
5632e8e095b3SSunil Goutham memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
56334b0385bcSSubbaraya Sundeep memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
5634e8e095b3SSunil Goutham /* Clear higher layer enable bit in the mid profile, just in case */
5635e8e095b3SSunil Goutham aq_req.prof.hl_en = 0;
5636e8e095b3SSunil Goutham aq_req.prof_mask.hl_en = 1;
5637e8e095b3SSunil Goutham
5638e8e095b3SSunil Goutham rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5639e8e095b3SSunil Goutham (struct nix_aq_enq_req *)&aq_req, NULL);
5640e8e095b3SSunil Goutham if (rc) {
5641e8e095b3SSunil Goutham dev_err(rvu->dev,
5642e8e095b3SSunil Goutham "%s: Failed to INIT context of mid layer profile %d\n",
5643e8e095b3SSunil Goutham __func__, mid_prof);
5644e8e095b3SSunil Goutham goto exit;
5645e8e095b3SSunil Goutham }
5646e8e095b3SSunil Goutham
5647e8e095b3SSunil Goutham /* Map both leaf profiles to this mid layer profile */
5648e8e095b3SSunil Goutham rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5649e8e095b3SSunil Goutham &aq_req, &aq_rsp,
5650e8e095b3SSunil Goutham leaf_prof, mid_prof);
5651e8e095b3SSunil Goutham if (rc) {
5652e8e095b3SSunil Goutham dev_err(rvu->dev,
5653e8e095b3SSunil Goutham "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5654e8e095b3SSunil Goutham __func__, leaf_prof, mid_prof);
5655e8e095b3SSunil Goutham goto exit;
5656e8e095b3SSunil Goutham }
5657e8e095b3SSunil Goutham
5658e8e095b3SSunil Goutham mutex_lock(&rvu->rsrc_lock);
5659e8e095b3SSunil Goutham ipolicer->ref_count[mid_prof]++;
5660e8e095b3SSunil Goutham mutex_unlock(&rvu->rsrc_lock);
5661e8e095b3SSunil Goutham
5662e8e095b3SSunil Goutham rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5663e8e095b3SSunil Goutham &aq_req, &aq_rsp,
5664e8e095b3SSunil Goutham leaf_match, mid_prof);
5665e8e095b3SSunil Goutham if (rc) {
5666e8e095b3SSunil Goutham dev_err(rvu->dev,
5667e8e095b3SSunil Goutham "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5668e8e095b3SSunil Goutham __func__, leaf_match, mid_prof);
5669e8e095b3SSunil Goutham ipolicer->ref_count[mid_prof]--;
5670e8e095b3SSunil Goutham goto exit;
5671e8e095b3SSunil Goutham }
5672e8e095b3SSunil Goutham
5673e8e095b3SSunil Goutham mutex_lock(&rvu->rsrc_lock);
5674e8e095b3SSunil Goutham ipolicer->ref_count[mid_prof]++;
5675e8e095b3SSunil Goutham mutex_unlock(&rvu->rsrc_lock);
5676e8e095b3SSunil Goutham
5677e8e095b3SSunil Goutham exit:
5678e8e095b3SSunil Goutham return rc;
5679e8e095b3SSunil Goutham }
5680e8e095b3SSunil Goutham
5681e8e095b3SSunil Goutham /* Called with mutex rsrc_lock */
nix_clear_ratelimit_aggr(struct rvu * rvu,struct nix_hw * nix_hw,u32 leaf_prof)5682e8e095b3SSunil Goutham static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
5683e8e095b3SSunil Goutham u32 leaf_prof)
5684e8e095b3SSunil Goutham {
5685e8e095b3SSunil Goutham struct nix_cn10k_aq_enq_req aq_req;
5686e8e095b3SSunil Goutham struct nix_cn10k_aq_enq_rsp aq_rsp;
5687e8e095b3SSunil Goutham struct nix_ipolicer *ipolicer;
5688e8e095b3SSunil Goutham u16 mid_prof;
5689e8e095b3SSunil Goutham int rc;
5690e8e095b3SSunil Goutham
5691e8e095b3SSunil Goutham mutex_unlock(&rvu->rsrc_lock);
5692e8e095b3SSunil Goutham
5693e8e095b3SSunil Goutham rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5694e8e095b3SSunil Goutham NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5695e8e095b3SSunil Goutham
5696e8e095b3SSunil Goutham mutex_lock(&rvu->rsrc_lock);
5697e8e095b3SSunil Goutham if (rc) {
5698e8e095b3SSunil Goutham dev_err(rvu->dev,
5699e8e095b3SSunil Goutham "%s: Failed to fetch context of leaf profile %d\n",
5700e8e095b3SSunil Goutham __func__, leaf_prof);
5701e8e095b3SSunil Goutham return;
5702e8e095b3SSunil Goutham }
5703e8e095b3SSunil Goutham
5704e8e095b3SSunil Goutham if (!aq_rsp.prof.hl_en)
5705e8e095b3SSunil Goutham return;
5706e8e095b3SSunil Goutham
5707e8e095b3SSunil Goutham mid_prof = aq_rsp.prof.band_prof_id;
5708e8e095b3SSunil Goutham ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5709e8e095b3SSunil Goutham ipolicer->ref_count[mid_prof]--;
5710e8e095b3SSunil Goutham /* If ref_count is zero, free mid layer profile */
5711e8e095b3SSunil Goutham if (!ipolicer->ref_count[mid_prof]) {
5712e8e095b3SSunil Goutham ipolicer->pfvf_map[mid_prof] = 0x00;
5713e8e095b3SSunil Goutham rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
5714e8e095b3SSunil Goutham }
5715e8e095b3SSunil Goutham }
571666c312eaSSunil Goutham
rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu * rvu,struct msg_req * req,struct nix_bandprof_get_hwinfo_rsp * rsp)571766c312eaSSunil Goutham int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
571866c312eaSSunil Goutham struct nix_bandprof_get_hwinfo_rsp *rsp)
571966c312eaSSunil Goutham {
572066c312eaSSunil Goutham struct nix_ipolicer *ipolicer;
572166c312eaSSunil Goutham int blkaddr, layer, err;
572266c312eaSSunil Goutham struct nix_hw *nix_hw;
572366c312eaSSunil Goutham u64 tu;
572466c312eaSSunil Goutham
572566c312eaSSunil Goutham if (!rvu->hw->cap.ipolicer)
572666c312eaSSunil Goutham return NIX_AF_ERR_IPOLICER_NOTSUPP;
572766c312eaSSunil Goutham
572866c312eaSSunil Goutham err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
572966c312eaSSunil Goutham if (err)
573066c312eaSSunil Goutham return err;
573166c312eaSSunil Goutham
573266c312eaSSunil Goutham /* Return number of bandwidth profiles free at each layer */
573366c312eaSSunil Goutham mutex_lock(&rvu->rsrc_lock);
573466c312eaSSunil Goutham for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
573566c312eaSSunil Goutham if (layer == BAND_PROF_INVAL_LAYER)
573666c312eaSSunil Goutham continue;
573766c312eaSSunil Goutham
573866c312eaSSunil Goutham ipolicer = &nix_hw->ipolicer[layer];
573966c312eaSSunil Goutham rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
574066c312eaSSunil Goutham }
574166c312eaSSunil Goutham mutex_unlock(&rvu->rsrc_lock);
574266c312eaSSunil Goutham
574366c312eaSSunil Goutham /* Set the policer timeunit in nanosec */
574466c312eaSSunil Goutham tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
574566c312eaSSunil Goutham rsp->policer_timeunit = (tu + 1) * 100;
574666c312eaSSunil Goutham
574766c312eaSSunil Goutham return 0;
574866c312eaSSunil Goutham }
5749