1 // SPDX-License-Identifier: GPL-2.0
2 /*  Marvell RPM CN10K driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/pci.h>
9 #include "rvu.h"
10 #include "cgx.h"
11 #include "rvu_reg.h"
12 
13 int rvu_set_channels_base(struct rvu *rvu)
14 {
15 	struct rvu_hwinfo *hw = rvu->hw;
16 	u16 cpt_chan_base;
17 	u64 nix_const;
18 	int blkaddr;
19 
20 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
21 	if (blkaddr < 0)
22 		return blkaddr;
23 
24 	nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
25 
26 	hw->cgx = (nix_const >> 12) & 0xFULL;
27 	hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
28 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
29 	hw->lbk_links = (nix_const >> 24) & 0xFULL;
30 	hw->cpt_links = (nix_const >> 44) & 0xFULL;
31 	hw->sdp_links = 1;
32 
33 	hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
34 	hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
35 	hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
36 
37 	/* No Programmable channels */
38 	if (!(nix_const & BIT_ULL(60)))
39 		return 0;
40 
41 	hw->cap.programmable_chans = true;
42 
43 	/* If programmable channels are present then configure
44 	 * channels such that all channel numbers are contiguous
45 	 * leaving no holes. This way the new CPT channels can be
46 	 * accomodated. The order of channel numbers assigned is
47 	 * LBK, SDP, CGX and CPT.
48 	 */
49 	hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links *
50 				((nix_const >> 16) & 0xFFULL);
51 	hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS;
52 
53 	cpt_chan_base = hw->cgx_chan_base + hw->cgx_links *
54 				(nix_const & 0xFFULL);
55 
56 	/* Out of 4096 channels start CPT from 2048 so
57 	 * that MSB for CPT channels is always set
58 	 */
59 	if (cpt_chan_base <= 0x800) {
60 		hw->cpt_chan_base = 0x800;
61 	} else {
62 		dev_err(rvu->dev,
63 			"CPT channels could not fit in the range 2048-4095\n");
64 		return -EINVAL;
65 	}
66 
67 	return 0;
68 }
69 
70 #define LBK_CONNECT_NIXX(a)		(0x0 + (a))
71 
72 static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
73 				u64 offset, int lbkid, u16 chans)
74 {
75 	struct rvu_hwinfo *hw = rvu->hw;
76 	u64 cfg;
77 
78 	cfg = readq(base + offset);
79 	cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
80 		 LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
81 	cfg |=	FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
82 	cfg |=	FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
83 	cfg |=	FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
84 
85 	writeq(cfg, base + offset);
86 }
87 
88 static void rvu_lbk_set_channels(struct rvu *rvu)
89 {
90 	struct pci_dev *pdev = NULL;
91 	void __iomem *base;
92 	u64 lbk_const;
93 	u8 src, dst;
94 	u16 chans;
95 
96 	/* To loopback packets between multiple NIX blocks
97 	 * mutliple LBK blocks are needed. With two NIX blocks,
98 	 * four LBK blocks are needed and each LBK block
99 	 * source and destination are as follows:
100 	 * LBK0 - source NIX0 and destination NIX1
101 	 * LBK1 - source NIX0 and destination NIX1
102 	 * LBK2 - source NIX1 and destination NIX0
103 	 * LBK3 - source NIX1 and destination NIX1
104 	 * As per the HRM channel numbers should be programmed as:
105 	 * P2X and X2P of LBK0 as same
106 	 * P2X and X2P of LBK3 as same
107 	 * P2X of LBK1 and X2P of LBK2 as same
108 	 * P2X of LBK2 and X2P of LBK1 as same
109 	 */
110 	while (true) {
111 		pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
112 				      PCI_DEVID_OCTEONTX2_LBK, pdev);
113 		if (!pdev)
114 			return;
115 
116 		base = pci_ioremap_bar(pdev, 0);
117 		if (!base)
118 			goto err_put;
119 
120 		lbk_const = readq(base + LBK_CONST);
121 		chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
122 		dst = FIELD_GET(LBK_CONST_DST, lbk_const);
123 		src = FIELD_GET(LBK_CONST_SRC, lbk_const);
124 
125 		if (src == dst) {
126 			if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
127 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
128 						    0, chans);
129 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
130 						    0, chans);
131 			} else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
132 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
133 						    1, chans);
134 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
135 						    1, chans);
136 			}
137 		} else {
138 			if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
139 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
140 						    0, chans);
141 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
142 						    1, chans);
143 			} else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
144 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
145 						    1, chans);
146 				__rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
147 						    0, chans);
148 			}
149 		}
150 		iounmap(base);
151 	}
152 err_put:
153 	pci_dev_put(pdev);
154 }
155 
156 static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
157 {
158 	u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
159 	u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
160 	struct rvu_hwinfo *hw = rvu->hw;
161 	int link, nix_link = 0;
162 	u16 start;
163 	u64 cfg;
164 
165 	cgx_chans = nix_const & 0xFFULL;
166 	lbk_chans = (nix_const >> 16) & 0xFFULL;
167 	sdp_chans = SDP_CHANNELS;
168 	cpt_chans = (nix_const >> 32) & 0xFFFULL;
169 
170 	start = hw->cgx_chan_base;
171 	for (link = 0; link < hw->cgx_links; link++, nix_link++) {
172 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
173 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
174 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
175 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
176 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
177 		start += cgx_chans;
178 	}
179 
180 	start = hw->lbk_chan_base;
181 	for (link = 0; link < hw->lbk_links; link++, nix_link++) {
182 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
183 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
184 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
185 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
186 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
187 		start += lbk_chans;
188 	}
189 
190 	start = hw->sdp_chan_base;
191 	for (link = 0; link < hw->sdp_links; link++, nix_link++) {
192 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
193 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
194 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
195 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
196 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
197 		start += sdp_chans;
198 	}
199 
200 	start = hw->cpt_chan_base;
201 	for (link = 0; link < hw->cpt_links; link++, nix_link++) {
202 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
203 		cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
204 		cfg |=	FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
205 		cfg |=	FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
206 		rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
207 		start += cpt_chans;
208 	}
209 }
210 
211 static void rvu_nix_set_channels(struct rvu *rvu)
212 {
213 	int blkaddr = 0;
214 
215 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
216 	while (blkaddr) {
217 		__rvu_nix_set_channels(rvu, blkaddr);
218 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
219 	}
220 }
221 
222 static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
223 {
224 	u64 cfg;
225 
226 	cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
227 	cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
228 
229 	/* There is no read-only constant register to read
230 	 * the number of channels for LMAC and it is always 16.
231 	 */
232 	cfg |=	FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
233 	cfg |=	FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
234 	cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
235 }
236 
237 static void rvu_rpm_set_channels(struct rvu *rvu)
238 {
239 	struct rvu_hwinfo *hw = rvu->hw;
240 	u16 base = hw->cgx_chan_base;
241 	int cgx, lmac;
242 
243 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
244 		for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
245 			__rvu_rpm_set_channels(cgx, lmac, base);
246 			base += 16;
247 		}
248 	}
249 }
250 
251 void rvu_program_channels(struct rvu *rvu)
252 {
253 	struct rvu_hwinfo *hw = rvu->hw;
254 
255 	if (!hw->cap.programmable_chans)
256 		return;
257 
258 	rvu_nix_set_channels(rvu);
259 	rvu_lbk_set_channels(rvu);
260 	rvu_rpm_set_channels(rvu);
261 }
262