xref: /openbmc/linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c (revision 4d75f5c664195b970e1cd2fd25b65b5eff257a0a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RPM CN10K driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/pci.h>
9 #include "rvu.h"
10 #include "cgx.h"
11 #include "rvu_reg.h"
12 
13 /* RVU LMTST */
14 #define LMT_TBL_OP_READ		0
15 #define LMT_TBL_OP_WRITE	1
16 #define LMT_MAPTBL_ENTRY_SIZE	16
17 #define LMT_MAX_VFS		256
18 
19 #define LMT_MAP_ENTRY_ENA      BIT_ULL(20)
20 #define LMT_MAP_ENTRY_LINES    GENMASK_ULL(18, 16)
21 
22 /* Function to perform operations (read/write) on lmtst map table */
lmtst_map_table_ops(struct rvu * rvu,u32 index,u64 * val,int lmt_tbl_op)23 static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
24 			       int lmt_tbl_op)
25 {
26 	void __iomem *lmt_map_base;
27 	u64 tbl_base, cfg;
28 	int pfs, vfs;
29 
30 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
31 	cfg  = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
32 	vfs = 1 << (cfg & 0xF);
33 	pfs = 1 << ((cfg >> 4) & 0x7);
34 
35 	lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE);
36 	if (!lmt_map_base) {
37 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
38 		return -ENOMEM;
39 	}
40 
41 	if (lmt_tbl_op == LMT_TBL_OP_READ) {
42 		*val = readq(lmt_map_base + index);
43 	} else {
44 		writeq((*val), (lmt_map_base + index));
45 
46 		cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1);
47 		/* 2048 LMTLINES */
48 		cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6);
49 
50 		writeq(cfg, (lmt_map_base + (index + 8)));
51 
52 		/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
53 		 * changes effective. Write 1 for flush and read is being used as a
54 		 * barrier and sets up a data dependency. Write to 0 after a write
55 		 * to 1 to complete the flush.
56 		 */
57 		rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
58 		rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
59 		rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
60 	}
61 
62 	iounmap(lmt_map_base);
63 	return 0;
64 }
65 
66 #define LMT_MAP_TBL_W1_OFF  8
rvu_get_lmtst_tbl_index(struct rvu * rvu,u16 pcifunc)67 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
68 {
69 	return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) +
70 		(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
71 }
72 
rvu_get_lmtaddr(struct rvu * rvu,u16 pcifunc,u64 iova,u64 * lmt_addr)73 static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
74 			   u64 iova, u64 *lmt_addr)
75 {
76 	u64 pa, val, pf;
77 	int err = 0;
78 
79 	if (!iova) {
80 		dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
81 		return -EINVAL;
82 	}
83 
84 	mutex_lock(&rvu->rsrc_lock);
85 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
86 	pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK;
87 	val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
88 	      ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
89 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
90 
91 	err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
92 	if (err) {
93 		dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
94 		goto exit;
95 	}
96 	val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
97 	if (val & ~0x1ULL) {
98 		dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
99 		err = -EIO;
100 		goto exit;
101 	}
102 	/* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
103 	 * PA[11:0] = IOVA[11:0]
104 	 */
105 	pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
106 	pa &= GENMASK_ULL(39, 0);
107 	*lmt_addr = (pa << 12) | (iova  & 0xFFF);
108 exit:
109 	mutex_unlock(&rvu->rsrc_lock);
110 	return err;
111 }
112 
rvu_update_lmtaddr(struct rvu * rvu,u16 pcifunc,u64 lmt_addr)113 static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
114 {
115 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
116 	u32 tbl_idx;
117 	int err = 0;
118 	u64 val;
119 
120 	/* Read the current lmt addr of pcifunc */
121 	tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
122 	err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
123 	if (err) {
124 		dev_err(rvu->dev,
125 			"Failed to read LMT map table: index 0x%x err %d\n",
126 			tbl_idx, err);
127 		return err;
128 	}
129 
130 	/* Storing the seondary's lmt base address as this needs to be
131 	 * reverted in FLR. Also making sure this default value doesn't
132 	 * get overwritten on multiple calls to this mailbox.
133 	 */
134 	if (!pfvf->lmt_base_addr)
135 		pfvf->lmt_base_addr = val;
136 
137 	/* Update the LMT table with new addr */
138 	err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
139 	if (err) {
140 		dev_err(rvu->dev,
141 			"Failed to update LMT map table: index 0x%x err %d\n",
142 			tbl_idx, err);
143 		return err;
144 	}
145 	return 0;
146 }
147 
rvu_mbox_handler_lmtst_tbl_setup(struct rvu * rvu,struct lmtst_tbl_setup_req * req,struct msg_rsp * rsp)148 int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
149 				     struct lmtst_tbl_setup_req *req,
150 				     struct msg_rsp *rsp)
151 {
152 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
153 	u32 pri_tbl_idx, tbl_idx;
154 	u64 lmt_addr;
155 	int err = 0;
156 	u64 val;
157 
158 	/* Check if PF_FUNC wants to use it's own local memory as LMTLINE
159 	 * region, if so, convert that IOVA to physical address and
160 	 * populate LMT table with that address
161 	 */
162 	if (req->use_local_lmt_region) {
163 		err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
164 				      req->lmt_iova, &lmt_addr);
165 		if (err < 0)
166 			return err;
167 
168 		/* Update the lmt addr for this PFFUNC in the LMT table */
169 		err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
170 		if (err)
171 			return err;
172 	}
173 
174 	/* Reconfiguring lmtst map table in lmt region shared mode i.e. make
175 	 * multiple PF_FUNCs to share an LMTLINE region, so primary/base
176 	 * pcifunc (which is passed as an argument to mailbox) is the one
177 	 * whose lmt base address will be shared among other secondary
178 	 * pcifunc (will be the one who is calling this mailbox).
179 	 */
180 	if (req->base_pcifunc) {
181 		/* Calculating the LMT table index equivalent to primary
182 		 * pcifunc.
183 		 */
184 		pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
185 
186 		/* Read the base lmt addr of the primary pcifunc */
187 		err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
188 					  LMT_TBL_OP_READ);
189 		if (err) {
190 			dev_err(rvu->dev,
191 				"Failed to read LMT map table: index 0x%x err %d\n",
192 				pri_tbl_idx, err);
193 			goto error;
194 		}
195 
196 		/* Update the base lmt addr of secondary with primary's base
197 		 * lmt addr.
198 		 */
199 		err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
200 		if (err)
201 			return err;
202 	}
203 
204 	/* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S
205 	 * like enabling scheduled LMTST, disable LMTLINE prefetch, disable
206 	 * early completion for ordered LMTST.
207 	 */
208 	if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
209 		tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
210 		err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
211 					  &val, LMT_TBL_OP_READ);
212 		if (err) {
213 			dev_err(rvu->dev,
214 				"Failed to read LMT map table: index 0x%x err %d\n",
215 				tbl_idx + LMT_MAP_TBL_W1_OFF, err);
216 			goto error;
217 		}
218 
219 		/* Storing lmt map table entry word1 default value as this needs
220 		 * to be reverted in FLR. Also making sure this default value
221 		 * doesn't get overwritten on multiple calls to this mailbox.
222 		 */
223 		if (!pfvf->lmt_map_ent_w1)
224 			pfvf->lmt_map_ent_w1 = val;
225 
226 		/* Disable early completion for Ordered LMTSTs. */
227 		if (req->dis_sched_early_comp)
228 			val |= (req->dis_sched_early_comp <<
229 				APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
230 		/* Enable scheduled LMTST */
231 		if (req->sch_ena)
232 			val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
233 				req->ssow_pf_func;
234 		/* Disables LMTLINE prefetch before receiving store data. */
235 		if (req->dis_line_pref)
236 			val |= (req->dis_line_pref <<
237 				APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
238 
239 		err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
240 					  &val, LMT_TBL_OP_WRITE);
241 		if (err) {
242 			dev_err(rvu->dev,
243 				"Failed to update LMT map table: index 0x%x err %d\n",
244 				tbl_idx + LMT_MAP_TBL_W1_OFF, err);
245 			goto error;
246 		}
247 	}
248 
249 error:
250 	return err;
251 }
252 
253 /* Resetting the lmtst map table to original base addresses */
rvu_reset_lmt_map_tbl(struct rvu * rvu,u16 pcifunc)254 void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
255 {
256 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
257 	u32 tbl_idx;
258 	int err;
259 
260 	if (is_rvu_otx2(rvu))
261 		return;
262 
263 	if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
264 		/* This corresponds to lmt map table index */
265 		tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
266 		/* Reverting back original lmt base addr for respective
267 		 * pcifunc.
268 		 */
269 		if (pfvf->lmt_base_addr) {
270 			err = lmtst_map_table_ops(rvu, tbl_idx,
271 						  &pfvf->lmt_base_addr,
272 						  LMT_TBL_OP_WRITE);
273 			if (err)
274 				dev_err(rvu->dev,
275 					"Failed to update LMT map table: index 0x%x err %d\n",
276 					tbl_idx, err);
277 			pfvf->lmt_base_addr = 0;
278 		}
279 		/* Reverting back to orginal word1 val of lmtst map table entry
280 		 * which underwent changes.
281 		 */
282 		if (pfvf->lmt_map_ent_w1) {
283 			err = lmtst_map_table_ops(rvu,
284 						  tbl_idx + LMT_MAP_TBL_W1_OFF,
285 						  &pfvf->lmt_map_ent_w1,
286 						  LMT_TBL_OP_WRITE);
287 			if (err)
288 				dev_err(rvu->dev,
289 					"Failed to update LMT map table: index 0x%x err %d\n",
290 					tbl_idx + LMT_MAP_TBL_W1_OFF, err);
291 			pfvf->lmt_map_ent_w1 = 0;
292 		}
293 	}
294 }
295 
rvu_set_channels_base(struct rvu * rvu)296 int rvu_set_channels_base(struct rvu *rvu)
297 {
298 	u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
299 	u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
300 	struct rvu_hwinfo *hw = rvu->hw;
301 	u64 nix_const, nix_const1;
302 	int blkaddr;
303 
304 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
305 	if (blkaddr < 0)
306 		return blkaddr;
307 
308 	nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
309 	nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
310 
311 	hw->cgx = (nix_const >> 12) & 0xFULL;
312 	hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
313 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
314 	hw->lbk_links = (nix_const >> 24) & 0xFULL;
315 	hw->cpt_links = (nix_const >> 44) & 0xFULL;
316 	hw->sdp_links = 1;
317 
318 	hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
319 	hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
320 	hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
321 
322 	/* No Programmable channels */
323 	if (!(nix_const & BIT_ULL(60)))
324 		return 0;
325 
326 	hw->cap.programmable_chans = true;
327 
328 	/* If programmable channels are present then configure
329 	 * channels such that all channel numbers are contiguous
330 	 * leaving no holes. This way the new CPT channels can be
331 	 * accomodated. The order of channel numbers assigned is
332 	 * LBK, SDP, CGX and CPT. Also the base channel number
333 	 * of a block must be multiple of number of channels
334 	 * of the block.
335 	 */
336 	nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
337 	nr_sdp_chans = nix_const1 & 0xFFFULL;
338 	nr_cgx_chans = nix_const & 0xFFULL;
339 	nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
340 
341 	sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
342 	/* Round up base channel to multiple of number of channels */
343 	hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
344 
345 	cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
346 	hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
347 
348 	cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
349 	hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
350 
351 	/* Out of 4096 channels start CPT from 2048 so
352 	 * that MSB for CPT channels is always set
353 	 */
354 	if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
355 		hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
356 	} else {
357 		dev_err(rvu->dev,
358 			"CPT channels could not fit in the range 2048-4095\n");
359 		return -EINVAL;
360 	}
361 
362 	return 0;
363 }
364 
365 #define LBK_CONNECT_NIXX(a)		(0x0 + (a))
366 
__rvu_lbk_set_chans(struct rvu * rvu,void __iomem * base,u64 offset,int lbkid,u16 chans)367 static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
368 				u64 offset, int lbkid, u16 chans)
369 {
370 	struct rvu_hwinfo *hw = rvu->hw;
371 	u64 cfg;
372 
373 	cfg = readq(base + offset);
374 	cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
375 		 LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
376 	cfg |=	FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
377 	cfg |=	FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
378 	cfg |=	FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
379 
380 	writeq(cfg, base + offset);
381 }
382 
rvu_lbk_set_channels(struct rvu * rvu)383 static void rvu_lbk_set_channels(struct rvu *rvu)
384 {
385 	struct pci_dev *pdev = NULL;
386 	void __iomem *base;
387 	u64 lbk_const;
388 	u8 src, dst;
389 	u16 chans;
390 
391 	/* To loopback packets between multiple NIX blocks
392 	 * mutliple LBK blocks are needed. With two NIX blocks,
393 	 * four LBK blocks are needed and each LBK block
394 	 * source and destination are as follows:
395 	 * LBK0 - source NIX0 and destination NIX1
396 	 * LBK1 - source NIX0 and destination NIX1
397 	 * LBK2 - source NIX1 and destination NIX0
398 	 * LBK3 - source NIX1 and destination NIX1
399 	 * As per the HRM channel numbers should be programmed as:
400 	 * P2X and X2P of LBK0 as same
401 	 * P2X and X2P of LBK3 as same
402 	 * P2X of LBK1 and X2P of LBK2 as same
403 	 * P2X of LBK2 and X2P of LBK1 as same
404 	 */
405 	while (true) {
406 		pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
407 				      PCI_DEVID_OCTEONTX2_LBK, pdev);
408 		if (!pdev)
409 			return;
410 
411 		base = pci_ioremap_bar(pdev, 0);
412 		if (!base)
413 			goto err_put;
414 
415 		lbk_const = readq(base + LBK_CONST);
416 		chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
417 		dst = FIELD_GET(LBK_CONST_DST, lbk_const);
418 		src = FIELD_GET(LBK_CONST_SRC, lbk_const);
419 
420 		if (src == dst) {
421 			if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
422 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
423 						    0, chans);
424 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
425 						    0, chans);
426 			} else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
427 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
428 						    1, chans);
429 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
430 						    1, chans);
431 			}
432 		} else {
433 			if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
434 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
435 						    0, chans);
436 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
437 						    1, chans);
438 			} else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
439 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
440 						    1, chans);
441 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
442 						    0, chans);
443 			}
444 		}
445 		iounmap(base);
446 	}
447 err_put:
448 	pci_dev_put(pdev);
449 }
450 
__rvu_nix_set_channels(struct rvu * rvu,int blkaddr)451 static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
452 {
453 	u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
454 	u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
455 	u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
456 	struct rvu_hwinfo *hw = rvu->hw;
457 	int link, nix_link = 0;
458 	u16 start;
459 	u64 cfg;
460 
461 	cgx_chans = nix_const & 0xFFULL;
462 	lbk_chans = (nix_const >> 16) & 0xFFULL;
463 	sdp_chans = nix_const1 & 0xFFFULL;
464 	cpt_chans = (nix_const >> 32) & 0xFFFULL;
465 
466 	start = hw->cgx_chan_base;
467 	for (link = 0; link < hw->cgx_links; link++, nix_link++) {
468 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
469 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
470 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
471 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
472 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
473 		start += cgx_chans;
474 	}
475 
476 	start = hw->lbk_chan_base;
477 	for (link = 0; link < hw->lbk_links; link++, nix_link++) {
478 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
479 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
480 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
481 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
482 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
483 		start += lbk_chans;
484 	}
485 
486 	start = hw->sdp_chan_base;
487 	for (link = 0; link < hw->sdp_links; link++, nix_link++) {
488 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
489 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
490 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
491 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
492 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
493 		start += sdp_chans;
494 	}
495 
496 	start = hw->cpt_chan_base;
497 	for (link = 0; link < hw->cpt_links; link++, nix_link++) {
498 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
499 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
500 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
501 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
502 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
503 		start += cpt_chans;
504 	}
505 }
506 
rvu_nix_set_channels(struct rvu * rvu)507 static void rvu_nix_set_channels(struct rvu *rvu)
508 {
509 	int blkaddr = 0;
510 
511 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
512 	while (blkaddr) {
513 		__rvu_nix_set_channels(rvu, blkaddr);
514 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
515 	}
516 }
517 
__rvu_rpm_set_channels(int cgxid,int lmacid,u16 base)518 static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
519 {
520 	u64 cfg;
521 
522 	cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
523 	cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
524 
525 	/* There is no read-only constant register to read
526 	 * the number of channels for LMAC and it is always 16.
527 	 */
528 	cfg |=	FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
529 	cfg |=	FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
530 	cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
531 }
532 
rvu_rpm_set_channels(struct rvu * rvu)533 static void rvu_rpm_set_channels(struct rvu *rvu)
534 {
535 	struct rvu_hwinfo *hw = rvu->hw;
536 	u16 base = hw->cgx_chan_base;
537 	int cgx, lmac;
538 
539 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
540 		for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
541 			__rvu_rpm_set_channels(cgx, lmac, base);
542 			base += 16;
543 		}
544 	}
545 }
546 
rvu_program_channels(struct rvu * rvu)547 void rvu_program_channels(struct rvu *rvu)
548 {
549 	struct rvu_hwinfo *hw = rvu->hw;
550 
551 	if (!hw->cap.programmable_chans)
552 		return;
553 
554 	rvu_nix_set_channels(rvu);
555 	rvu_lbk_set_channels(rvu);
556 	rvu_rpm_set_channels(rvu);
557 }
558 
rvu_nix_block_cn10k_init(struct rvu * rvu,struct nix_hw * nix_hw)559 void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
560 {
561 	int blkaddr = nix_hw->blkaddr;
562 	u64 cfg;
563 
564 	/* Set AF vWQE timer interval to a LF configurable range of
565 	 * 6.4us to 1.632ms.
566 	 */
567 	rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL);
568 
569 	/* Enable NIX RX stream and global conditional clock to
570 	 * avoild multiple free of NPA buffers.
571 	 */
572 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
573 	cfg |= BIT_ULL(1) | BIT_ULL(2);
574 	rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
575 }
576