xref: /openbmc/linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1242da439SSubbaraya Sundeep // SPDX-License-Identifier: GPL-2.0
2242da439SSubbaraya Sundeep /* Marvell RPM CN10K driver
3242da439SSubbaraya Sundeep  *
4242da439SSubbaraya Sundeep  * Copyright (C) 2020 Marvell.
5242da439SSubbaraya Sundeep  */
6242da439SSubbaraya Sundeep 
7242da439SSubbaraya Sundeep #include <linux/bitfield.h>
8242da439SSubbaraya Sundeep #include <linux/pci.h>
9242da439SSubbaraya Sundeep #include "rvu.h"
10242da439SSubbaraya Sundeep #include "cgx.h"
11242da439SSubbaraya Sundeep #include "rvu_reg.h"
12242da439SSubbaraya Sundeep 
13873a1e3dSHarman Kalra /* RVU LMTST */
14873a1e3dSHarman Kalra #define LMT_TBL_OP_READ		0
15873a1e3dSHarman Kalra #define LMT_TBL_OP_WRITE	1
16873a1e3dSHarman Kalra #define LMT_MAP_TABLE_SIZE	(128 * 1024)
17873a1e3dSHarman Kalra #define LMT_MAPTBL_ENTRY_SIZE	16
18873a1e3dSHarman Kalra 
19873a1e3dSHarman Kalra /* Function to perform operations (read/write) on lmtst map table */
lmtst_map_table_ops(struct rvu * rvu,u32 index,u64 * val,int lmt_tbl_op)20873a1e3dSHarman Kalra static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
21873a1e3dSHarman Kalra 			       int lmt_tbl_op)
22873a1e3dSHarman Kalra {
23873a1e3dSHarman Kalra 	void __iomem *lmt_map_base;
24873a1e3dSHarman Kalra 	u64 tbl_base;
25873a1e3dSHarman Kalra 
26873a1e3dSHarman Kalra 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
27873a1e3dSHarman Kalra 
28873a1e3dSHarman Kalra 	lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
29873a1e3dSHarman Kalra 	if (!lmt_map_base) {
30873a1e3dSHarman Kalra 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
31873a1e3dSHarman Kalra 		return -ENOMEM;
32873a1e3dSHarman Kalra 	}
33873a1e3dSHarman Kalra 
34873a1e3dSHarman Kalra 	if (lmt_tbl_op == LMT_TBL_OP_READ) {
35873a1e3dSHarman Kalra 		*val = readq(lmt_map_base + index);
36873a1e3dSHarman Kalra 	} else {
37873a1e3dSHarman Kalra 		writeq((*val), (lmt_map_base + index));
38873a1e3dSHarman Kalra 		/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
39873a1e3dSHarman Kalra 		 * changes effective. Write 1 for flush and read is being used as a
40873a1e3dSHarman Kalra 		 * barrier and sets up a data dependency. Write to 0 after a write
41873a1e3dSHarman Kalra 		 * to 1 to complete the flush.
42873a1e3dSHarman Kalra 		 */
43873a1e3dSHarman Kalra 		rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
44873a1e3dSHarman Kalra 		rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
45873a1e3dSHarman Kalra 		rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
46873a1e3dSHarman Kalra 	}
47873a1e3dSHarman Kalra 
48873a1e3dSHarman Kalra 	iounmap(lmt_map_base);
49873a1e3dSHarman Kalra 	return 0;
50873a1e3dSHarman Kalra }
51873a1e3dSHarman Kalra 
5249d6baeaSHarman Kalra #define LMT_MAP_TBL_W1_OFF  8
rvu_get_lmtst_tbl_index(struct rvu * rvu,u16 pcifunc)53873a1e3dSHarman Kalra static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
54873a1e3dSHarman Kalra {
55873a1e3dSHarman Kalra 	return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
56873a1e3dSHarman Kalra 		(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
57873a1e3dSHarman Kalra }
58873a1e3dSHarman Kalra 
rvu_get_lmtaddr(struct rvu * rvu,u16 pcifunc,u64 iova,u64 * lmt_addr)59893ae972SGeetha sowjanya static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
60893ae972SGeetha sowjanya 			   u64 iova, u64 *lmt_addr)
61893ae972SGeetha sowjanya {
62893ae972SGeetha sowjanya 	u64 pa, val, pf;
63*048486f8SGeetha sowjanya 	int err = 0;
64893ae972SGeetha sowjanya 
65893ae972SGeetha sowjanya 	if (!iova) {
66893ae972SGeetha sowjanya 		dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
67893ae972SGeetha sowjanya 		return -EINVAL;
68893ae972SGeetha sowjanya 	}
69893ae972SGeetha sowjanya 
70*048486f8SGeetha sowjanya 	mutex_lock(&rvu->rsrc_lock);
71893ae972SGeetha sowjanya 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
72893ae972SGeetha sowjanya 	pf = rvu_get_pf(pcifunc) & 0x1F;
73893ae972SGeetha sowjanya 	val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
74893ae972SGeetha sowjanya 	      ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
75893ae972SGeetha sowjanya 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
76893ae972SGeetha sowjanya 
77893ae972SGeetha sowjanya 	err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
78893ae972SGeetha sowjanya 	if (err) {
79893ae972SGeetha sowjanya 		dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
80*048486f8SGeetha sowjanya 		goto exit;
81893ae972SGeetha sowjanya 	}
82893ae972SGeetha sowjanya 	val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
83893ae972SGeetha sowjanya 	if (val & ~0x1ULL) {
84893ae972SGeetha sowjanya 		dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
85*048486f8SGeetha sowjanya 		err = -EIO;
86*048486f8SGeetha sowjanya 		goto exit;
87893ae972SGeetha sowjanya 	}
88623da5caSGeetha sowjanya 	/* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
89893ae972SGeetha sowjanya 	 * PA[11:0] = IOVA[11:0]
90893ae972SGeetha sowjanya 	 */
91623da5caSGeetha sowjanya 	pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
92893ae972SGeetha sowjanya 	pa &= GENMASK_ULL(39, 0);
93893ae972SGeetha sowjanya 	*lmt_addr = (pa << 12) | (iova  & 0xFFF);
94*048486f8SGeetha sowjanya exit:
95*048486f8SGeetha sowjanya 	mutex_unlock(&rvu->rsrc_lock);
96*048486f8SGeetha sowjanya 	return err;
97893ae972SGeetha sowjanya }
98893ae972SGeetha sowjanya 
rvu_update_lmtaddr(struct rvu * rvu,u16 pcifunc,u64 lmt_addr)99893ae972SGeetha sowjanya static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
100893ae972SGeetha sowjanya {
101893ae972SGeetha sowjanya 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
102893ae972SGeetha sowjanya 	u32 tbl_idx;
103893ae972SGeetha sowjanya 	int err = 0;
104893ae972SGeetha sowjanya 	u64 val;
105893ae972SGeetha sowjanya 
106893ae972SGeetha sowjanya 	/* Read the current lmt addr of pcifunc */
107893ae972SGeetha sowjanya 	tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
108893ae972SGeetha sowjanya 	err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
109893ae972SGeetha sowjanya 	if (err) {
110893ae972SGeetha sowjanya 		dev_err(rvu->dev,
111893ae972SGeetha sowjanya 			"Failed to read LMT map table: index 0x%x err %d\n",
112893ae972SGeetha sowjanya 			tbl_idx, err);
113893ae972SGeetha sowjanya 		return err;
114893ae972SGeetha sowjanya 	}
115893ae972SGeetha sowjanya 
116893ae972SGeetha sowjanya 	/* Storing the seondary's lmt base address as this needs to be
117893ae972SGeetha sowjanya 	 * reverted in FLR. Also making sure this default value doesn't
118893ae972SGeetha sowjanya 	 * get overwritten on multiple calls to this mailbox.
119893ae972SGeetha sowjanya 	 */
120893ae972SGeetha sowjanya 	if (!pfvf->lmt_base_addr)
121893ae972SGeetha sowjanya 		pfvf->lmt_base_addr = val;
122893ae972SGeetha sowjanya 
123893ae972SGeetha sowjanya 	/* Update the LMT table with new addr */
124893ae972SGeetha sowjanya 	err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
125893ae972SGeetha sowjanya 	if (err) {
126893ae972SGeetha sowjanya 		dev_err(rvu->dev,
127893ae972SGeetha sowjanya 			"Failed to update LMT map table: index 0x%x err %d\n",
128893ae972SGeetha sowjanya 			tbl_idx, err);
129893ae972SGeetha sowjanya 		return err;
130893ae972SGeetha sowjanya 	}
131893ae972SGeetha sowjanya 	return 0;
132893ae972SGeetha sowjanya }
133893ae972SGeetha sowjanya 
rvu_mbox_handler_lmtst_tbl_setup(struct rvu * rvu,struct lmtst_tbl_setup_req * req,struct msg_rsp * rsp)134873a1e3dSHarman Kalra int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
135873a1e3dSHarman Kalra 				     struct lmtst_tbl_setup_req *req,
136873a1e3dSHarman Kalra 				     struct msg_rsp *rsp)
137873a1e3dSHarman Kalra {
13849d6baeaSHarman Kalra 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
13949d6baeaSHarman Kalra 	u32 pri_tbl_idx, tbl_idx;
14049d6baeaSHarman Kalra 	u64 lmt_addr;
141873a1e3dSHarman Kalra 	int err = 0;
14249d6baeaSHarman Kalra 	u64 val;
143893ae972SGeetha sowjanya 
144893ae972SGeetha sowjanya 	/* Check if PF_FUNC wants to use it's own local memory as LMTLINE
145893ae972SGeetha sowjanya 	 * region, if so, convert that IOVA to physical address and
146893ae972SGeetha sowjanya 	 * populate LMT table with that address
147893ae972SGeetha sowjanya 	 */
148893ae972SGeetha sowjanya 	if (req->use_local_lmt_region) {
149893ae972SGeetha sowjanya 		err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
150893ae972SGeetha sowjanya 				      req->lmt_iova, &lmt_addr);
151893ae972SGeetha sowjanya 		if (err < 0)
152893ae972SGeetha sowjanya 			return err;
153893ae972SGeetha sowjanya 
154893ae972SGeetha sowjanya 		/* Update the lmt addr for this PFFUNC in the LMT table */
155893ae972SGeetha sowjanya 		err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
156893ae972SGeetha sowjanya 		if (err)
157893ae972SGeetha sowjanya 			return err;
158893ae972SGeetha sowjanya 	}
159873a1e3dSHarman Kalra 
160873a1e3dSHarman Kalra 	/* Reconfiguring lmtst map table in lmt region shared mode i.e. make
161873a1e3dSHarman Kalra 	 * multiple PF_FUNCs to share an LMTLINE region, so primary/base
162873a1e3dSHarman Kalra 	 * pcifunc (which is passed as an argument to mailbox) is the one
163873a1e3dSHarman Kalra 	 * whose lmt base address will be shared among other secondary
164873a1e3dSHarman Kalra 	 * pcifunc (will be the one who is calling this mailbox).
165873a1e3dSHarman Kalra 	 */
166873a1e3dSHarman Kalra 	if (req->base_pcifunc) {
167873a1e3dSHarman Kalra 		/* Calculating the LMT table index equivalent to primary
168873a1e3dSHarman Kalra 		 * pcifunc.
169873a1e3dSHarman Kalra 		 */
170873a1e3dSHarman Kalra 		pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
171873a1e3dSHarman Kalra 
172873a1e3dSHarman Kalra 		/* Read the base lmt addr of the primary pcifunc */
173873a1e3dSHarman Kalra 		err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
174873a1e3dSHarman Kalra 					  LMT_TBL_OP_READ);
175873a1e3dSHarman Kalra 		if (err) {
176873a1e3dSHarman Kalra 			dev_err(rvu->dev,
177873a1e3dSHarman Kalra 				"Failed to read LMT map table: index 0x%x err %d\n",
178873a1e3dSHarman Kalra 				pri_tbl_idx, err);
17949d6baeaSHarman Kalra 			goto error;
180873a1e3dSHarman Kalra 		}
181873a1e3dSHarman Kalra 
182873a1e3dSHarman Kalra 		/* Update the base lmt addr of secondary with primary's base
183873a1e3dSHarman Kalra 		 * lmt addr.
184873a1e3dSHarman Kalra 		 */
185893ae972SGeetha sowjanya 		err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
186893ae972SGeetha sowjanya 		if (err)
187893ae972SGeetha sowjanya 			return err;
188873a1e3dSHarman Kalra 	}
189873a1e3dSHarman Kalra 
19049d6baeaSHarman Kalra 	/* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S
19149d6baeaSHarman Kalra 	 * like enabling scheduled LMTST, disable LMTLINE prefetch, disable
19249d6baeaSHarman Kalra 	 * early completion for ordered LMTST.
19349d6baeaSHarman Kalra 	 */
19449d6baeaSHarman Kalra 	if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
19549d6baeaSHarman Kalra 		tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
19649d6baeaSHarman Kalra 		err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
19749d6baeaSHarman Kalra 					  &val, LMT_TBL_OP_READ);
19849d6baeaSHarman Kalra 		if (err) {
19949d6baeaSHarman Kalra 			dev_err(rvu->dev,
20049d6baeaSHarman Kalra 				"Failed to read LMT map table: index 0x%x err %d\n",
20149d6baeaSHarman Kalra 				tbl_idx + LMT_MAP_TBL_W1_OFF, err);
20249d6baeaSHarman Kalra 			goto error;
20349d6baeaSHarman Kalra 		}
20449d6baeaSHarman Kalra 
20549d6baeaSHarman Kalra 		/* Storing lmt map table entry word1 default value as this needs
20649d6baeaSHarman Kalra 		 * to be reverted in FLR. Also making sure this default value
20749d6baeaSHarman Kalra 		 * doesn't get overwritten on multiple calls to this mailbox.
20849d6baeaSHarman Kalra 		 */
20949d6baeaSHarman Kalra 		if (!pfvf->lmt_map_ent_w1)
21049d6baeaSHarman Kalra 			pfvf->lmt_map_ent_w1 = val;
21149d6baeaSHarman Kalra 
21249d6baeaSHarman Kalra 		/* Disable early completion for Ordered LMTSTs. */
21349d6baeaSHarman Kalra 		if (req->dis_sched_early_comp)
21449d6baeaSHarman Kalra 			val |= (req->dis_sched_early_comp <<
21549d6baeaSHarman Kalra 				APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
21649d6baeaSHarman Kalra 		/* Enable scheduled LMTST */
21749d6baeaSHarman Kalra 		if (req->sch_ena)
21849d6baeaSHarman Kalra 			val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
21949d6baeaSHarman Kalra 				req->ssow_pf_func;
22049d6baeaSHarman Kalra 		/* Disables LMTLINE prefetch before receiving store data. */
22149d6baeaSHarman Kalra 		if (req->dis_line_pref)
22249d6baeaSHarman Kalra 			val |= (req->dis_line_pref <<
22349d6baeaSHarman Kalra 				APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
22449d6baeaSHarman Kalra 
22549d6baeaSHarman Kalra 		err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
22649d6baeaSHarman Kalra 					  &val, LMT_TBL_OP_WRITE);
22749d6baeaSHarman Kalra 		if (err) {
22849d6baeaSHarman Kalra 			dev_err(rvu->dev,
22949d6baeaSHarman Kalra 				"Failed to update LMT map table: index 0x%x err %d\n",
23049d6baeaSHarman Kalra 				tbl_idx + LMT_MAP_TBL_W1_OFF, err);
23149d6baeaSHarman Kalra 			goto error;
23249d6baeaSHarman Kalra 		}
23349d6baeaSHarman Kalra 	}
23449d6baeaSHarman Kalra 
23549d6baeaSHarman Kalra error:
23649d6baeaSHarman Kalra 	return err;
237873a1e3dSHarman Kalra }
238873a1e3dSHarman Kalra 
239873a1e3dSHarman Kalra /* Resetting the lmtst map table to original base addresses */
rvu_reset_lmt_map_tbl(struct rvu * rvu,u16 pcifunc)240873a1e3dSHarman Kalra void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
241873a1e3dSHarman Kalra {
242873a1e3dSHarman Kalra 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
243873a1e3dSHarman Kalra 	u32 tbl_idx;
244873a1e3dSHarman Kalra 	int err;
245873a1e3dSHarman Kalra 
246873a1e3dSHarman Kalra 	if (is_rvu_otx2(rvu))
247873a1e3dSHarman Kalra 		return;
248873a1e3dSHarman Kalra 
24949d6baeaSHarman Kalra 	if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
250873a1e3dSHarman Kalra 		/* This corresponds to lmt map table index */
251873a1e3dSHarman Kalra 		tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
252873a1e3dSHarman Kalra 		/* Reverting back original lmt base addr for respective
253873a1e3dSHarman Kalra 		 * pcifunc.
254873a1e3dSHarman Kalra 		 */
25549d6baeaSHarman Kalra 		if (pfvf->lmt_base_addr) {
25649d6baeaSHarman Kalra 			err = lmtst_map_table_ops(rvu, tbl_idx,
25749d6baeaSHarman Kalra 						  &pfvf->lmt_base_addr,
258873a1e3dSHarman Kalra 						  LMT_TBL_OP_WRITE);
259873a1e3dSHarman Kalra 			if (err)
260873a1e3dSHarman Kalra 				dev_err(rvu->dev,
261873a1e3dSHarman Kalra 					"Failed to update LMT map table: index 0x%x err %d\n",
262873a1e3dSHarman Kalra 					tbl_idx, err);
263873a1e3dSHarman Kalra 			pfvf->lmt_base_addr = 0;
264873a1e3dSHarman Kalra 		}
26549d6baeaSHarman Kalra 		/* Reverting back to orginal word1 val of lmtst map table entry
26649d6baeaSHarman Kalra 		 * which underwent changes.
26749d6baeaSHarman Kalra 		 */
26849d6baeaSHarman Kalra 		if (pfvf->lmt_map_ent_w1) {
26949d6baeaSHarman Kalra 			err = lmtst_map_table_ops(rvu,
27049d6baeaSHarman Kalra 						  tbl_idx + LMT_MAP_TBL_W1_OFF,
27149d6baeaSHarman Kalra 						  &pfvf->lmt_map_ent_w1,
27249d6baeaSHarman Kalra 						  LMT_TBL_OP_WRITE);
27349d6baeaSHarman Kalra 			if (err)
27449d6baeaSHarman Kalra 				dev_err(rvu->dev,
27549d6baeaSHarman Kalra 					"Failed to update LMT map table: index 0x%x err %d\n",
27649d6baeaSHarman Kalra 					tbl_idx + LMT_MAP_TBL_W1_OFF, err);
27749d6baeaSHarman Kalra 			pfvf->lmt_map_ent_w1 = 0;
27849d6baeaSHarman Kalra 		}
27949d6baeaSHarman Kalra 	}
280873a1e3dSHarman Kalra }
281873a1e3dSHarman Kalra 
rvu_set_channels_base(struct rvu * rvu)282242da439SSubbaraya Sundeep int rvu_set_channels_base(struct rvu *rvu)
283242da439SSubbaraya Sundeep {
284477b53f3SSubbaraya Sundeep 	u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
285477b53f3SSubbaraya Sundeep 	u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
286242da439SSubbaraya Sundeep 	struct rvu_hwinfo *hw = rvu->hw;
287477b53f3SSubbaraya Sundeep 	u64 nix_const, nix_const1;
288242da439SSubbaraya Sundeep 	int blkaddr;
289242da439SSubbaraya Sundeep 
290242da439SSubbaraya Sundeep 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
291242da439SSubbaraya Sundeep 	if (blkaddr < 0)
292242da439SSubbaraya Sundeep 		return blkaddr;
293242da439SSubbaraya Sundeep 
294242da439SSubbaraya Sundeep 	nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
295477b53f3SSubbaraya Sundeep 	nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
296242da439SSubbaraya Sundeep 
297242da439SSubbaraya Sundeep 	hw->cgx = (nix_const >> 12) & 0xFULL;
298242da439SSubbaraya Sundeep 	hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
299242da439SSubbaraya Sundeep 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
300242da439SSubbaraya Sundeep 	hw->lbk_links = (nix_const >> 24) & 0xFULL;
301242da439SSubbaraya Sundeep 	hw->cpt_links = (nix_const >> 44) & 0xFULL;
302242da439SSubbaraya Sundeep 	hw->sdp_links = 1;
303242da439SSubbaraya Sundeep 
304242da439SSubbaraya Sundeep 	hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
305242da439SSubbaraya Sundeep 	hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
306242da439SSubbaraya Sundeep 	hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
307242da439SSubbaraya Sundeep 
308242da439SSubbaraya Sundeep 	/* No Programmable channels */
309242da439SSubbaraya Sundeep 	if (!(nix_const & BIT_ULL(60)))
310242da439SSubbaraya Sundeep 		return 0;
311242da439SSubbaraya Sundeep 
312242da439SSubbaraya Sundeep 	hw->cap.programmable_chans = true;
313242da439SSubbaraya Sundeep 
314242da439SSubbaraya Sundeep 	/* If programmable channels are present then configure
315242da439SSubbaraya Sundeep 	 * channels such that all channel numbers are contiguous
316242da439SSubbaraya Sundeep 	 * leaving no holes. This way the new CPT channels can be
317242da439SSubbaraya Sundeep 	 * accomodated. The order of channel numbers assigned is
318477b53f3SSubbaraya Sundeep 	 * LBK, SDP, CGX and CPT. Also the base channel number
319477b53f3SSubbaraya Sundeep 	 * of a block must be multiple of number of channels
320477b53f3SSubbaraya Sundeep 	 * of the block.
321242da439SSubbaraya Sundeep 	 */
322477b53f3SSubbaraya Sundeep 	nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
323477b53f3SSubbaraya Sundeep 	nr_sdp_chans = nix_const1 & 0xFFFULL;
324477b53f3SSubbaraya Sundeep 	nr_cgx_chans = nix_const & 0xFFULL;
325477b53f3SSubbaraya Sundeep 	nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
326242da439SSubbaraya Sundeep 
327477b53f3SSubbaraya Sundeep 	sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
328477b53f3SSubbaraya Sundeep 	/* Round up base channel to multiple of number of channels */
329477b53f3SSubbaraya Sundeep 	hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
330477b53f3SSubbaraya Sundeep 
331477b53f3SSubbaraya Sundeep 	cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
332477b53f3SSubbaraya Sundeep 	hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
333477b53f3SSubbaraya Sundeep 
334477b53f3SSubbaraya Sundeep 	cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
335477b53f3SSubbaraya Sundeep 	hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
336242da439SSubbaraya Sundeep 
337242da439SSubbaraya Sundeep 	/* Out of 4096 channels start CPT from 2048 so
338242da439SSubbaraya Sundeep 	 * that MSB for CPT channels is always set
339242da439SSubbaraya Sundeep 	 */
3404b5a3ab1SSrujana Challa 	if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
3414b5a3ab1SSrujana Challa 		hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
342242da439SSubbaraya Sundeep 	} else {
343242da439SSubbaraya Sundeep 		dev_err(rvu->dev,
344242da439SSubbaraya Sundeep 			"CPT channels could not fit in the range 2048-4095\n");
345242da439SSubbaraya Sundeep 		return -EINVAL;
346242da439SSubbaraya Sundeep 	}
347242da439SSubbaraya Sundeep 
348242da439SSubbaraya Sundeep 	return 0;
349242da439SSubbaraya Sundeep }
350242da439SSubbaraya Sundeep 
351242da439SSubbaraya Sundeep #define LBK_CONNECT_NIXX(a)		(0x0 + (a))
352242da439SSubbaraya Sundeep 
__rvu_lbk_set_chans(struct rvu * rvu,void __iomem * base,u64 offset,int lbkid,u16 chans)353242da439SSubbaraya Sundeep static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
354242da439SSubbaraya Sundeep 				u64 offset, int lbkid, u16 chans)
355242da439SSubbaraya Sundeep {
356242da439SSubbaraya Sundeep 	struct rvu_hwinfo *hw = rvu->hw;
357242da439SSubbaraya Sundeep 	u64 cfg;
358242da439SSubbaraya Sundeep 
359242da439SSubbaraya Sundeep 	cfg = readq(base + offset);
360242da439SSubbaraya Sundeep 	cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
361242da439SSubbaraya Sundeep 		 LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
362242da439SSubbaraya Sundeep 	cfg |=	FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
363242da439SSubbaraya Sundeep 	cfg |=	FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
364242da439SSubbaraya Sundeep 	cfg |=	FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
365242da439SSubbaraya Sundeep 
366242da439SSubbaraya Sundeep 	writeq(cfg, base + offset);
367242da439SSubbaraya Sundeep }
368242da439SSubbaraya Sundeep 
rvu_lbk_set_channels(struct rvu * rvu)369242da439SSubbaraya Sundeep static void rvu_lbk_set_channels(struct rvu *rvu)
370242da439SSubbaraya Sundeep {
371242da439SSubbaraya Sundeep 	struct pci_dev *pdev = NULL;
372242da439SSubbaraya Sundeep 	void __iomem *base;
373242da439SSubbaraya Sundeep 	u64 lbk_const;
374242da439SSubbaraya Sundeep 	u8 src, dst;
375242da439SSubbaraya Sundeep 	u16 chans;
376242da439SSubbaraya Sundeep 
377242da439SSubbaraya Sundeep 	/* To loopback packets between multiple NIX blocks
378242da439SSubbaraya Sundeep 	 * mutliple LBK blocks are needed. With two NIX blocks,
379242da439SSubbaraya Sundeep 	 * four LBK blocks are needed and each LBK block
380242da439SSubbaraya Sundeep 	 * source and destination are as follows:
381242da439SSubbaraya Sundeep 	 * LBK0 - source NIX0 and destination NIX1
382242da439SSubbaraya Sundeep 	 * LBK1 - source NIX0 and destination NIX1
383242da439SSubbaraya Sundeep 	 * LBK2 - source NIX1 and destination NIX0
384242da439SSubbaraya Sundeep 	 * LBK3 - source NIX1 and destination NIX1
385242da439SSubbaraya Sundeep 	 * As per the HRM channel numbers should be programmed as:
386242da439SSubbaraya Sundeep 	 * P2X and X2P of LBK0 as same
387242da439SSubbaraya Sundeep 	 * P2X and X2P of LBK3 as same
388242da439SSubbaraya Sundeep 	 * P2X of LBK1 and X2P of LBK2 as same
389242da439SSubbaraya Sundeep 	 * P2X of LBK2 and X2P of LBK1 as same
390242da439SSubbaraya Sundeep 	 */
391242da439SSubbaraya Sundeep 	while (true) {
392242da439SSubbaraya Sundeep 		pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
393242da439SSubbaraya Sundeep 				      PCI_DEVID_OCTEONTX2_LBK, pdev);
394242da439SSubbaraya Sundeep 		if (!pdev)
395242da439SSubbaraya Sundeep 			return;
396242da439SSubbaraya Sundeep 
397242da439SSubbaraya Sundeep 		base = pci_ioremap_bar(pdev, 0);
398242da439SSubbaraya Sundeep 		if (!base)
399242da439SSubbaraya Sundeep 			goto err_put;
400242da439SSubbaraya Sundeep 
401242da439SSubbaraya Sundeep 		lbk_const = readq(base + LBK_CONST);
402242da439SSubbaraya Sundeep 		chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
403242da439SSubbaraya Sundeep 		dst = FIELD_GET(LBK_CONST_DST, lbk_const);
404242da439SSubbaraya Sundeep 		src = FIELD_GET(LBK_CONST_SRC, lbk_const);
405242da439SSubbaraya Sundeep 
406242da439SSubbaraya Sundeep 		if (src == dst) {
407242da439SSubbaraya Sundeep 			if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
408242da439SSubbaraya Sundeep 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
409242da439SSubbaraya Sundeep 						    0, chans);
410242da439SSubbaraya Sundeep 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
411242da439SSubbaraya Sundeep 						    0, chans);
412242da439SSubbaraya Sundeep 			} else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
413242da439SSubbaraya Sundeep 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
414242da439SSubbaraya Sundeep 						    1, chans);
415242da439SSubbaraya Sundeep 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
416242da439SSubbaraya Sundeep 						    1, chans);
417242da439SSubbaraya Sundeep 			}
418242da439SSubbaraya Sundeep 		} else {
419242da439SSubbaraya Sundeep 			if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
420242da439SSubbaraya Sundeep 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
421242da439SSubbaraya Sundeep 						    0, chans);
422242da439SSubbaraya Sundeep 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
423242da439SSubbaraya Sundeep 						    1, chans);
424242da439SSubbaraya Sundeep 			} else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
425242da439SSubbaraya Sundeep 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
426242da439SSubbaraya Sundeep 						    1, chans);
427242da439SSubbaraya Sundeep 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
428242da439SSubbaraya Sundeep 						    0, chans);
429242da439SSubbaraya Sundeep 			}
430242da439SSubbaraya Sundeep 		}
431242da439SSubbaraya Sundeep 		iounmap(base);
432242da439SSubbaraya Sundeep 	}
433242da439SSubbaraya Sundeep err_put:
434242da439SSubbaraya Sundeep 	pci_dev_put(pdev);
435242da439SSubbaraya Sundeep }
436242da439SSubbaraya Sundeep 
__rvu_nix_set_channels(struct rvu * rvu,int blkaddr)437242da439SSubbaraya Sundeep static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
438242da439SSubbaraya Sundeep {
439477b53f3SSubbaraya Sundeep 	u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
440242da439SSubbaraya Sundeep 	u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
441242da439SSubbaraya Sundeep 	u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
442242da439SSubbaraya Sundeep 	struct rvu_hwinfo *hw = rvu->hw;
443242da439SSubbaraya Sundeep 	int link, nix_link = 0;
444242da439SSubbaraya Sundeep 	u16 start;
445242da439SSubbaraya Sundeep 	u64 cfg;
446242da439SSubbaraya Sundeep 
447242da439SSubbaraya Sundeep 	cgx_chans = nix_const & 0xFFULL;
448242da439SSubbaraya Sundeep 	lbk_chans = (nix_const >> 16) & 0xFFULL;
449477b53f3SSubbaraya Sundeep 	sdp_chans = nix_const1 & 0xFFFULL;
450242da439SSubbaraya Sundeep 	cpt_chans = (nix_const >> 32) & 0xFFFULL;
451242da439SSubbaraya Sundeep 
452242da439SSubbaraya Sundeep 	start = hw->cgx_chan_base;
453242da439SSubbaraya Sundeep 	for (link = 0; link < hw->cgx_links; link++, nix_link++) {
454242da439SSubbaraya Sundeep 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
455242da439SSubbaraya Sundeep 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
456242da439SSubbaraya Sundeep 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
457242da439SSubbaraya Sundeep 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
458242da439SSubbaraya Sundeep 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
459242da439SSubbaraya Sundeep 		start += cgx_chans;
460242da439SSubbaraya Sundeep 	}
461242da439SSubbaraya Sundeep 
462242da439SSubbaraya Sundeep 	start = hw->lbk_chan_base;
463242da439SSubbaraya Sundeep 	for (link = 0; link < hw->lbk_links; link++, nix_link++) {
464242da439SSubbaraya Sundeep 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
465242da439SSubbaraya Sundeep 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
466242da439SSubbaraya Sundeep 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
467242da439SSubbaraya Sundeep 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
468242da439SSubbaraya Sundeep 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
469242da439SSubbaraya Sundeep 		start += lbk_chans;
470242da439SSubbaraya Sundeep 	}
471242da439SSubbaraya Sundeep 
472242da439SSubbaraya Sundeep 	start = hw->sdp_chan_base;
473242da439SSubbaraya Sundeep 	for (link = 0; link < hw->sdp_links; link++, nix_link++) {
474242da439SSubbaraya Sundeep 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
475242da439SSubbaraya Sundeep 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
476242da439SSubbaraya Sundeep 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
477242da439SSubbaraya Sundeep 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
478242da439SSubbaraya Sundeep 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
479242da439SSubbaraya Sundeep 		start += sdp_chans;
480242da439SSubbaraya Sundeep 	}
481242da439SSubbaraya Sundeep 
482242da439SSubbaraya Sundeep 	start = hw->cpt_chan_base;
483242da439SSubbaraya Sundeep 	for (link = 0; link < hw->cpt_links; link++, nix_link++) {
484242da439SSubbaraya Sundeep 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
485242da439SSubbaraya Sundeep 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
486242da439SSubbaraya Sundeep 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
487242da439SSubbaraya Sundeep 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
488242da439SSubbaraya Sundeep 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
489242da439SSubbaraya Sundeep 		start += cpt_chans;
490242da439SSubbaraya Sundeep 	}
491242da439SSubbaraya Sundeep }
492242da439SSubbaraya Sundeep 
rvu_nix_set_channels(struct rvu * rvu)493242da439SSubbaraya Sundeep static void rvu_nix_set_channels(struct rvu *rvu)
494242da439SSubbaraya Sundeep {
495242da439SSubbaraya Sundeep 	int blkaddr = 0;
496242da439SSubbaraya Sundeep 
497242da439SSubbaraya Sundeep 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
498242da439SSubbaraya Sundeep 	while (blkaddr) {
499242da439SSubbaraya Sundeep 		__rvu_nix_set_channels(rvu, blkaddr);
500242da439SSubbaraya Sundeep 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
501242da439SSubbaraya Sundeep 	}
502242da439SSubbaraya Sundeep }
503242da439SSubbaraya Sundeep 
__rvu_rpm_set_channels(int cgxid,int lmacid,u16 base)504242da439SSubbaraya Sundeep static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
505242da439SSubbaraya Sundeep {
506242da439SSubbaraya Sundeep 	u64 cfg;
507242da439SSubbaraya Sundeep 
508242da439SSubbaraya Sundeep 	cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
509242da439SSubbaraya Sundeep 	cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
510242da439SSubbaraya Sundeep 
511242da439SSubbaraya Sundeep 	/* There is no read-only constant register to read
512242da439SSubbaraya Sundeep 	 * the number of channels for LMAC and it is always 16.
513242da439SSubbaraya Sundeep 	 */
514242da439SSubbaraya Sundeep 	cfg |=	FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
515242da439SSubbaraya Sundeep 	cfg |=	FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
516242da439SSubbaraya Sundeep 	cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
517242da439SSubbaraya Sundeep }
518242da439SSubbaraya Sundeep 
rvu_rpm_set_channels(struct rvu * rvu)519242da439SSubbaraya Sundeep static void rvu_rpm_set_channels(struct rvu *rvu)
520242da439SSubbaraya Sundeep {
521242da439SSubbaraya Sundeep 	struct rvu_hwinfo *hw = rvu->hw;
522242da439SSubbaraya Sundeep 	u16 base = hw->cgx_chan_base;
523242da439SSubbaraya Sundeep 	int cgx, lmac;
524242da439SSubbaraya Sundeep 
525242da439SSubbaraya Sundeep 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
526242da439SSubbaraya Sundeep 		for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
527242da439SSubbaraya Sundeep 			__rvu_rpm_set_channels(cgx, lmac, base);
528242da439SSubbaraya Sundeep 			base += 16;
529242da439SSubbaraya Sundeep 		}
530242da439SSubbaraya Sundeep 	}
531242da439SSubbaraya Sundeep }
532242da439SSubbaraya Sundeep 
rvu_program_channels(struct rvu * rvu)533242da439SSubbaraya Sundeep void rvu_program_channels(struct rvu *rvu)
534242da439SSubbaraya Sundeep {
535242da439SSubbaraya Sundeep 	struct rvu_hwinfo *hw = rvu->hw;
536242da439SSubbaraya Sundeep 
537242da439SSubbaraya Sundeep 	if (!hw->cap.programmable_chans)
538242da439SSubbaraya Sundeep 		return;
539242da439SSubbaraya Sundeep 
540242da439SSubbaraya Sundeep 	rvu_nix_set_channels(rvu);
541242da439SSubbaraya Sundeep 	rvu_lbk_set_channels(rvu);
542242da439SSubbaraya Sundeep 	rvu_rpm_set_channels(rvu);
543242da439SSubbaraya Sundeep }
544933a01adSGeetha sowjanya 
rvu_nix_block_cn10k_init(struct rvu * rvu,struct nix_hw * nix_hw)545933a01adSGeetha sowjanya void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
546933a01adSGeetha sowjanya {
547933a01adSGeetha sowjanya 	int blkaddr = nix_hw->blkaddr;
548933a01adSGeetha sowjanya 	u64 cfg;
549933a01adSGeetha sowjanya 
550933a01adSGeetha sowjanya 	/* Set AF vWQE timer interval to a LF configurable range of
551933a01adSGeetha sowjanya 	 * 6.4us to 1.632ms.
552933a01adSGeetha sowjanya 	 */
553933a01adSGeetha sowjanya 	rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL);
554933a01adSGeetha sowjanya 
555933a01adSGeetha sowjanya 	/* Enable NIX RX stream and global conditional clock to
556933a01adSGeetha sowjanya 	 * avoild multiple free of NPA buffers.
557933a01adSGeetha sowjanya 	 */
558933a01adSGeetha sowjanya 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
559933a01adSGeetha sowjanya 	cfg |= BIT_ULL(1) | BIT_ULL(2);
560933a01adSGeetha sowjanya 	rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
561933a01adSGeetha sowjanya }
562