1 // SPDX-License-Identifier: GPL-2.0
2 /*  Marvell RPM CN10K driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/pci.h>
9 #include "rvu.h"
10 #include "cgx.h"
11 #include "rvu_reg.h"
12 
13 /* RVU LMTST */
14 #define LMT_TBL_OP_READ		0
15 #define LMT_TBL_OP_WRITE	1
16 #define LMT_MAP_TABLE_SIZE	(128 * 1024)
17 #define LMT_MAPTBL_ENTRY_SIZE	16
18 
19 /* Function to perform operations (read/write) on lmtst map table */
20 static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
21 			       int lmt_tbl_op)
22 {
23 	void __iomem *lmt_map_base;
24 	u64 tbl_base;
25 
26 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
27 
28 	lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
29 	if (!lmt_map_base) {
30 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
31 		return -ENOMEM;
32 	}
33 
34 	if (lmt_tbl_op == LMT_TBL_OP_READ) {
35 		*val = readq(lmt_map_base + index);
36 	} else {
37 		writeq((*val), (lmt_map_base + index));
38 		/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
39 		 * changes effective. Write 1 for flush and read is being used as a
40 		 * barrier and sets up a data dependency. Write to 0 after a write
41 		 * to 1 to complete the flush.
42 		 */
43 		rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
44 		rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
45 		rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
46 	}
47 
48 	iounmap(lmt_map_base);
49 	return 0;
50 }
51 
52 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
53 {
54 	return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
55 		(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
56 }
57 
58 static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
59 			   u64 iova, u64 *lmt_addr)
60 {
61 	u64 pa, val, pf;
62 	int err;
63 
64 	if (!iova) {
65 		dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
66 		return -EINVAL;
67 	}
68 
69 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
70 	pf = rvu_get_pf(pcifunc) & 0x1F;
71 	val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
72 	      ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
73 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
74 
75 	err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
76 	if (err) {
77 		dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
78 		return err;
79 	}
80 	val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
81 	if (val & ~0x1ULL) {
82 		dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
83 		return -EIO;
84 	}
85 	/* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
86 	 * PA[11:0] = IOVA[11:0]
87 	 */
88 	pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
89 	pa &= GENMASK_ULL(39, 0);
90 	*lmt_addr = (pa << 12) | (iova  & 0xFFF);
91 
92 	return 0;
93 }
94 
95 static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
96 {
97 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
98 	u32 tbl_idx;
99 	int err = 0;
100 	u64 val;
101 
102 	/* Read the current lmt addr of pcifunc */
103 	tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
104 	err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
105 	if (err) {
106 		dev_err(rvu->dev,
107 			"Failed to read LMT map table: index 0x%x err %d\n",
108 			tbl_idx, err);
109 		return err;
110 	}
111 
112 	/* Storing the seondary's lmt base address as this needs to be
113 	 * reverted in FLR. Also making sure this default value doesn't
114 	 * get overwritten on multiple calls to this mailbox.
115 	 */
116 	if (!pfvf->lmt_base_addr)
117 		pfvf->lmt_base_addr = val;
118 
119 	/* Update the LMT table with new addr */
120 	err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
121 	if (err) {
122 		dev_err(rvu->dev,
123 			"Failed to update LMT map table: index 0x%x err %d\n",
124 			tbl_idx, err);
125 		return err;
126 	}
127 	return 0;
128 }
129 
130 int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
131 				     struct lmtst_tbl_setup_req *req,
132 				     struct msg_rsp *rsp)
133 {
134 	u64 lmt_addr, val;
135 	u32 pri_tbl_idx;
136 	int err = 0;
137 
138 	/* Check if PF_FUNC wants to use it's own local memory as LMTLINE
139 	 * region, if so, convert that IOVA to physical address and
140 	 * populate LMT table with that address
141 	 */
142 	if (req->use_local_lmt_region) {
143 		err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
144 				      req->lmt_iova, &lmt_addr);
145 		if (err < 0)
146 			return err;
147 
148 		/* Update the lmt addr for this PFFUNC in the LMT table */
149 		err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
150 		if (err)
151 			return err;
152 	}
153 
154 	/* Reconfiguring lmtst map table in lmt region shared mode i.e. make
155 	 * multiple PF_FUNCs to share an LMTLINE region, so primary/base
156 	 * pcifunc (which is passed as an argument to mailbox) is the one
157 	 * whose lmt base address will be shared among other secondary
158 	 * pcifunc (will be the one who is calling this mailbox).
159 	 */
160 	if (req->base_pcifunc) {
161 		/* Calculating the LMT table index equivalent to primary
162 		 * pcifunc.
163 		 */
164 		pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
165 
166 		/* Read the base lmt addr of the primary pcifunc */
167 		err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
168 					  LMT_TBL_OP_READ);
169 		if (err) {
170 			dev_err(rvu->dev,
171 				"Failed to read LMT map table: index 0x%x err %d\n",
172 				pri_tbl_idx, err);
173 			return err;
174 		}
175 
176 		/* Update the base lmt addr of secondary with primary's base
177 		 * lmt addr.
178 		 */
179 		err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
180 		if (err)
181 			return err;
182 	}
183 
184 	return 0;
185 }
186 
187 /* Resetting the lmtst map table to original base addresses */
188 void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
189 {
190 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
191 	u32 tbl_idx;
192 	int err;
193 
194 	if (is_rvu_otx2(rvu))
195 		return;
196 
197 	if (pfvf->lmt_base_addr) {
198 		/* This corresponds to lmt map table index */
199 		tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
200 		/* Reverting back original lmt base addr for respective
201 		 * pcifunc.
202 		 */
203 		err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr,
204 					  LMT_TBL_OP_WRITE);
205 		if (err)
206 			dev_err(rvu->dev,
207 				"Failed to update LMT map table: index 0x%x err %d\n",
208 				tbl_idx, err);
209 		pfvf->lmt_base_addr = 0;
210 	}
211 }
212 
213 int rvu_set_channels_base(struct rvu *rvu)
214 {
215 	u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
216 	u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
217 	struct rvu_hwinfo *hw = rvu->hw;
218 	u64 nix_const, nix_const1;
219 	int blkaddr;
220 
221 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
222 	if (blkaddr < 0)
223 		return blkaddr;
224 
225 	nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
226 	nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
227 
228 	hw->cgx = (nix_const >> 12) & 0xFULL;
229 	hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
230 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
231 	hw->lbk_links = (nix_const >> 24) & 0xFULL;
232 	hw->cpt_links = (nix_const >> 44) & 0xFULL;
233 	hw->sdp_links = 1;
234 
235 	hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
236 	hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
237 	hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
238 
239 	/* No Programmable channels */
240 	if (!(nix_const & BIT_ULL(60)))
241 		return 0;
242 
243 	hw->cap.programmable_chans = true;
244 
245 	/* If programmable channels are present then configure
246 	 * channels such that all channel numbers are contiguous
247 	 * leaving no holes. This way the new CPT channels can be
248 	 * accomodated. The order of channel numbers assigned is
249 	 * LBK, SDP, CGX and CPT. Also the base channel number
250 	 * of a block must be multiple of number of channels
251 	 * of the block.
252 	 */
253 	nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
254 	nr_sdp_chans = nix_const1 & 0xFFFULL;
255 	nr_cgx_chans = nix_const & 0xFFULL;
256 	nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
257 
258 	sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
259 	/* Round up base channel to multiple of number of channels */
260 	hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
261 
262 	cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
263 	hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
264 
265 	cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
266 	hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
267 
268 	/* Out of 4096 channels start CPT from 2048 so
269 	 * that MSB for CPT channels is always set
270 	 */
271 	if (cpt_chan_base <= 0x800) {
272 		hw->cpt_chan_base = 0x800;
273 	} else {
274 		dev_err(rvu->dev,
275 			"CPT channels could not fit in the range 2048-4095\n");
276 		return -EINVAL;
277 	}
278 
279 	return 0;
280 }
281 
282 #define LBK_CONNECT_NIXX(a)		(0x0 + (a))
283 
284 static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
285 				u64 offset, int lbkid, u16 chans)
286 {
287 	struct rvu_hwinfo *hw = rvu->hw;
288 	u64 cfg;
289 
290 	cfg = readq(base + offset);
291 	cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
292 		 LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
293 	cfg |=	FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
294 	cfg |=	FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
295 	cfg |=	FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
296 
297 	writeq(cfg, base + offset);
298 }
299 
300 static void rvu_lbk_set_channels(struct rvu *rvu)
301 {
302 	struct pci_dev *pdev = NULL;
303 	void __iomem *base;
304 	u64 lbk_const;
305 	u8 src, dst;
306 	u16 chans;
307 
308 	/* To loopback packets between multiple NIX blocks
309 	 * mutliple LBK blocks are needed. With two NIX blocks,
310 	 * four LBK blocks are needed and each LBK block
311 	 * source and destination are as follows:
312 	 * LBK0 - source NIX0 and destination NIX1
313 	 * LBK1 - source NIX0 and destination NIX1
314 	 * LBK2 - source NIX1 and destination NIX0
315 	 * LBK3 - source NIX1 and destination NIX1
316 	 * As per the HRM channel numbers should be programmed as:
317 	 * P2X and X2P of LBK0 as same
318 	 * P2X and X2P of LBK3 as same
319 	 * P2X of LBK1 and X2P of LBK2 as same
320 	 * P2X of LBK2 and X2P of LBK1 as same
321 	 */
322 	while (true) {
323 		pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
324 				      PCI_DEVID_OCTEONTX2_LBK, pdev);
325 		if (!pdev)
326 			return;
327 
328 		base = pci_ioremap_bar(pdev, 0);
329 		if (!base)
330 			goto err_put;
331 
332 		lbk_const = readq(base + LBK_CONST);
333 		chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
334 		dst = FIELD_GET(LBK_CONST_DST, lbk_const);
335 		src = FIELD_GET(LBK_CONST_SRC, lbk_const);
336 
337 		if (src == dst) {
338 			if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
339 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
340 						    0, chans);
341 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
342 						    0, chans);
343 			} else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
344 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
345 						    1, chans);
346 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
347 						    1, chans);
348 			}
349 		} else {
350 			if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
351 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
352 						    0, chans);
353 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
354 						    1, chans);
355 			} else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
356 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
357 						    1, chans);
358 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
359 						    0, chans);
360 			}
361 		}
362 		iounmap(base);
363 	}
364 err_put:
365 	pci_dev_put(pdev);
366 }
367 
368 static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
369 {
370 	u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
371 	u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
372 	u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
373 	struct rvu_hwinfo *hw = rvu->hw;
374 	int link, nix_link = 0;
375 	u16 start;
376 	u64 cfg;
377 
378 	cgx_chans = nix_const & 0xFFULL;
379 	lbk_chans = (nix_const >> 16) & 0xFFULL;
380 	sdp_chans = nix_const1 & 0xFFFULL;
381 	cpt_chans = (nix_const >> 32) & 0xFFFULL;
382 
383 	start = hw->cgx_chan_base;
384 	for (link = 0; link < hw->cgx_links; link++, nix_link++) {
385 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
386 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
387 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
388 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
389 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
390 		start += cgx_chans;
391 	}
392 
393 	start = hw->lbk_chan_base;
394 	for (link = 0; link < hw->lbk_links; link++, nix_link++) {
395 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
396 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
397 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
398 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
399 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
400 		start += lbk_chans;
401 	}
402 
403 	start = hw->sdp_chan_base;
404 	for (link = 0; link < hw->sdp_links; link++, nix_link++) {
405 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
406 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
407 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
408 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
409 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
410 		start += sdp_chans;
411 	}
412 
413 	start = hw->cpt_chan_base;
414 	for (link = 0; link < hw->cpt_links; link++, nix_link++) {
415 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
416 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
417 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
418 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
419 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
420 		start += cpt_chans;
421 	}
422 }
423 
424 static void rvu_nix_set_channels(struct rvu *rvu)
425 {
426 	int blkaddr = 0;
427 
428 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
429 	while (blkaddr) {
430 		__rvu_nix_set_channels(rvu, blkaddr);
431 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
432 	}
433 }
434 
435 static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
436 {
437 	u64 cfg;
438 
439 	cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
440 	cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
441 
442 	/* There is no read-only constant register to read
443 	 * the number of channels for LMAC and it is always 16.
444 	 */
445 	cfg |=	FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
446 	cfg |=	FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
447 	cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
448 }
449 
450 static void rvu_rpm_set_channels(struct rvu *rvu)
451 {
452 	struct rvu_hwinfo *hw = rvu->hw;
453 	u16 base = hw->cgx_chan_base;
454 	int cgx, lmac;
455 
456 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
457 		for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
458 			__rvu_rpm_set_channels(cgx, lmac, base);
459 			base += 16;
460 		}
461 	}
462 }
463 
464 void rvu_program_channels(struct rvu *rvu)
465 {
466 	struct rvu_hwinfo *hw = rvu->hw;
467 
468 	if (!hw->cap.programmable_chans)
469 		return;
470 
471 	rvu_nix_set_channels(rvu);
472 	rvu_lbk_set_channels(rvu);
473 	rvu_rpm_set_channels(rvu);
474 }
475