xref: /openbmc/linux/drivers/soc/bcm/brcmstb/biuctrl.c (revision c3fbced9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Broadcom STB SoCs Bus Unit Interface controls
4  *
5  * Copyright (C) 2015, Broadcom Corporation
6  */
7 
8 #define pr_fmt(fmt)	"brcmstb: " KBUILD_MODNAME ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/of_address.h>
13 #include <linux/syscore_ops.h>
14 #include <linux/soc/brcmstb/brcmstb.h>
15 
16 #define RACENPREF_MASK			0x3
17 #define RACPREFINST_SHIFT		0
18 #define RACENINST_SHIFT			2
19 #define RACPREFDATA_SHIFT		4
20 #define RACENDATA_SHIFT			6
21 #define RAC_CPU_SHIFT			8
22 #define RACCFG_MASK			0xff
23 #define DPREF_LINE_2_SHIFT		24
24 #define DPREF_LINE_2_MASK		0xff
25 
26 /* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
27 #define RAC_DATA_INST_EN_MASK		(1 << RACPREFINST_SHIFT | \
28 					 RACENPREF_MASK << RACENINST_SHIFT | \
29 					 1 << RACPREFDATA_SHIFT | \
30 					 RACENPREF_MASK << RACENDATA_SHIFT)
31 
32 #define  CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK	0x70000000
33 #define CPU_CREDIT_REG_MCPx_READ_CRED_MASK	0xf
34 #define CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK	0xf
35 #define CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(x)	((x) * 8)
36 #define CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(x)	(((x) * 8) + 4)
37 
38 #define CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(x)	((x) * 8)
39 #define CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK		0xff
40 
41 #define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_THRESHOLD_MASK	0xf
42 #define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_MASK		0xf
43 #define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT	4
44 #define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_ENABLE		BIT(8)
45 
46 static void __iomem *cpubiuctrl_base;
47 static bool mcp_wr_pairing_en;
48 static const int *cpubiuctrl_regs;
49 
50 enum cpubiuctrl_regs {
51 	CPU_CREDIT_REG = 0,
52 	CPU_MCP_FLOW_REG,
53 	CPU_WRITEBACK_CTRL_REG,
54 	RAC_CONFIG0_REG,
55 	RAC_CONFIG1_REG,
56 	NUM_CPU_BIUCTRL_REGS,
57 };
58 
cbc_readl(int reg)59 static inline u32 cbc_readl(int reg)
60 {
61 	int offset = cpubiuctrl_regs[reg];
62 
63 	if (offset == -1 ||
64 	    (IS_ENABLED(CONFIG_CACHE_B15_RAC) && reg >= RAC_CONFIG0_REG))
65 		return (u32)-1;
66 
67 	return readl_relaxed(cpubiuctrl_base + offset);
68 }
69 
cbc_writel(u32 val,int reg)70 static inline void cbc_writel(u32 val, int reg)
71 {
72 	int offset = cpubiuctrl_regs[reg];
73 
74 	if (offset == -1 ||
75 	    (IS_ENABLED(CONFIG_CACHE_B15_RAC) && reg >= RAC_CONFIG0_REG))
76 		return;
77 
78 	writel(val, cpubiuctrl_base + offset);
79 }
80 
81 static const int b15_cpubiuctrl_regs[] = {
82 	[CPU_CREDIT_REG] = 0x184,
83 	[CPU_MCP_FLOW_REG] = -1,
84 	[CPU_WRITEBACK_CTRL_REG] = -1,
85 	[RAC_CONFIG0_REG] = -1,
86 	[RAC_CONFIG1_REG] = -1,
87 };
88 
89 /* Odd cases, e.g: 7260A0 */
90 static const int b53_cpubiuctrl_no_wb_regs[] = {
91 	[CPU_CREDIT_REG] = 0x0b0,
92 	[CPU_MCP_FLOW_REG] = 0x0b4,
93 	[CPU_WRITEBACK_CTRL_REG] = -1,
94 	[RAC_CONFIG0_REG] = 0x78,
95 	[RAC_CONFIG1_REG] = 0x7c,
96 };
97 
98 static const int b53_cpubiuctrl_regs[] = {
99 	[CPU_CREDIT_REG] = 0x0b0,
100 	[CPU_MCP_FLOW_REG] = 0x0b4,
101 	[CPU_WRITEBACK_CTRL_REG] = 0x22c,
102 	[RAC_CONFIG0_REG] = 0x78,
103 	[RAC_CONFIG1_REG] = 0x7c,
104 };
105 
106 static const int a72_cpubiuctrl_regs[] = {
107 	[CPU_CREDIT_REG] = 0x18,
108 	[CPU_MCP_FLOW_REG] = 0x1c,
109 	[CPU_WRITEBACK_CTRL_REG] = 0x20,
110 	[RAC_CONFIG0_REG] = 0x08,
111 	[RAC_CONFIG1_REG] = 0x0c,
112 };
113 
mcp_write_pairing_set(void)114 static int __init mcp_write_pairing_set(void)
115 {
116 	u32 creds = 0;
117 
118 	if (!cpubiuctrl_base)
119 		return -1;
120 
121 	creds = cbc_readl(CPU_CREDIT_REG);
122 	if (mcp_wr_pairing_en) {
123 		pr_info("MCP: Enabling write pairing\n");
124 		cbc_writel(creds | CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
125 			   CPU_CREDIT_REG);
126 	} else if (creds & CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK) {
127 		pr_info("MCP: Disabling write pairing\n");
128 		cbc_writel(creds & ~CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
129 			   CPU_CREDIT_REG);
130 	} else {
131 		pr_info("MCP: Write pairing already disabled\n");
132 	}
133 
134 	return 0;
135 }
136 
137 static const u32 a72_b53_mach_compat[] = {
138 	0x7211,
139 	0x72113,
140 	0x72116,
141 	0x7216,
142 	0x72164,
143 	0x72165,
144 	0x7255,
145 	0x7260,
146 	0x7268,
147 	0x7271,
148 	0x7278,
149 };
150 
151 /* The read-ahead cache present in the Brahma-B53 CPU is a special piece of
152  * hardware after the integrated L2 cache of the B53 CPU complex whose purpose
153  * is to prefetch instruction and/or data with a line size of either 64 bytes
154  * or 256 bytes. The rationale is that the data-bus of the CPU interface is
155  * optimized for 256-byte transactions, and enabling the read-ahead cache
156  * provides a significant performance boost (typically twice the performance
157  * for a memcpy benchmark application).
158  *
159  * The read-ahead cache is transparent for Virtual Address cache maintenance
160  * operations: IC IVAU, DC IVAC, DC CVAC, DC CVAU and DC CIVAC.  So no special
161  * handling is needed for the DMA API above and beyond what is included in the
162  * arm64 implementation.
163  *
164  * In addition, since the Point of Unification is typically between L1 and L2
165  * for the Brahma-B53 processor no special read-ahead cache handling is needed
166  * for the IC IALLU and IC IALLUIS cache maintenance operations.
167  *
168  * However, it is not possible to specify the cache level (L3) for the cache
169  * maintenance instructions operating by set/way to operate on the read-ahead
170  * cache.  The read-ahead cache will maintain coherency when inner cache lines
171  * are cleaned by set/way, but if it is necessary to invalidate inner cache
172  * lines by set/way to maintain coherency with system masters operating on
173  * shared memory that does not have hardware support for coherency, then it
174  * will also be necessary to explicitly invalidate the read-ahead cache.
175  */
a72_b53_rac_enable_all(struct device_node * np)176 static void __init a72_b53_rac_enable_all(struct device_node *np)
177 {
178 	unsigned int cpu;
179 	u32 enable = 0, pref_dist, shift;
180 
181 	if (IS_ENABLED(CONFIG_CACHE_B15_RAC))
182 		return;
183 
184 	if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
185 		return;
186 
187 	pref_dist = cbc_readl(RAC_CONFIG1_REG);
188 	for_each_possible_cpu(cpu) {
189 		shift = cpu * RAC_CPU_SHIFT + RACPREFDATA_SHIFT;
190 		enable |= RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT);
191 		if (cpubiuctrl_regs == a72_cpubiuctrl_regs) {
192 			enable &= ~(RACENPREF_MASK << shift);
193 			enable |= 3 << shift;
194 			pref_dist |= 1 << (cpu + DPREF_LINE_2_SHIFT);
195 		}
196 	}
197 
198 	cbc_writel(enable, RAC_CONFIG0_REG);
199 	cbc_writel(pref_dist, RAC_CONFIG1_REG);
200 
201 	pr_info("%pOF: Broadcom %s read-ahead cache\n",
202 		np, cpubiuctrl_regs == a72_cpubiuctrl_regs ?
203 		"Cortex-A72" : "Brahma-B53");
204 }
205 
mcp_a72_b53_set(void)206 static void __init mcp_a72_b53_set(void)
207 {
208 	unsigned int i;
209 	u32 reg;
210 
211 	reg = brcmstb_get_family_id();
212 
213 	for (i = 0; i < ARRAY_SIZE(a72_b53_mach_compat); i++) {
214 		if (BRCM_ID(reg) == a72_b53_mach_compat[i])
215 			break;
216 	}
217 
218 	if (i == ARRAY_SIZE(a72_b53_mach_compat))
219 		return;
220 
221 	/* Set all 3 MCP interfaces to 8 credits */
222 	reg = cbc_readl(CPU_CREDIT_REG);
223 	for (i = 0; i < 3; i++) {
224 		reg &= ~(CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK <<
225 			 CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i));
226 		reg &= ~(CPU_CREDIT_REG_MCPx_READ_CRED_MASK <<
227 			 CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i));
228 		reg |= 8 << CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i);
229 		reg |= 8 << CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i);
230 	}
231 	cbc_writel(reg, CPU_CREDIT_REG);
232 
233 	/* Max out the number of in-flight Jwords reads on the MCP interface */
234 	reg = cbc_readl(CPU_MCP_FLOW_REG);
235 	for (i = 0; i < 3; i++)
236 		reg |= CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK <<
237 			CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(i);
238 	cbc_writel(reg, CPU_MCP_FLOW_REG);
239 
240 	/* Enable writeback throttling, set timeout to 128 cycles, 256 cycles
241 	 * threshold
242 	 */
243 	reg = cbc_readl(CPU_WRITEBACK_CTRL_REG);
244 	reg |= CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_ENABLE;
245 	reg &= ~CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_THRESHOLD_MASK;
246 	reg &= ~(CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_MASK <<
247 		 CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT);
248 	reg |= 8;
249 	reg |= 7 << CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT;
250 	cbc_writel(reg, CPU_WRITEBACK_CTRL_REG);
251 }
252 
setup_hifcpubiuctrl_regs(struct device_node * np)253 static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
254 {
255 	struct device_node *cpu_dn;
256 	u32 family_id;
257 	int ret = 0;
258 
259 	cpubiuctrl_base = of_iomap(np, 0);
260 	if (!cpubiuctrl_base) {
261 		pr_err("failed to remap BIU control base\n");
262 		ret = -ENOMEM;
263 		goto out;
264 	}
265 
266 	mcp_wr_pairing_en = of_property_read_bool(np, "brcm,write-pairing");
267 
268 	cpu_dn = of_get_cpu_node(0, NULL);
269 	if (!cpu_dn) {
270 		pr_err("failed to obtain CPU device node\n");
271 		ret = -ENODEV;
272 		goto out;
273 	}
274 
275 	if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
276 		cpubiuctrl_regs = b15_cpubiuctrl_regs;
277 	else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
278 		cpubiuctrl_regs = b53_cpubiuctrl_regs;
279 	else if (of_device_is_compatible(cpu_dn, "arm,cortex-a72"))
280 		cpubiuctrl_regs = a72_cpubiuctrl_regs;
281 	else {
282 		pr_err("unsupported CPU\n");
283 		ret = -EINVAL;
284 	}
285 	of_node_put(cpu_dn);
286 
287 	family_id = brcmstb_get_family_id();
288 	if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
289 		cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
290 out:
291 	if (ret && cpubiuctrl_base) {
292 		iounmap(cpubiuctrl_base);
293 		cpubiuctrl_base = NULL;
294 	}
295 	return ret;
296 }
297 
298 #ifdef CONFIG_PM_SLEEP
299 static u32 cpubiuctrl_reg_save[NUM_CPU_BIUCTRL_REGS];
300 
brcmstb_cpu_credit_reg_suspend(void)301 static int brcmstb_cpu_credit_reg_suspend(void)
302 {
303 	unsigned int i;
304 
305 	if (!cpubiuctrl_base)
306 		return 0;
307 
308 	for (i = 0; i < NUM_CPU_BIUCTRL_REGS; i++)
309 		cpubiuctrl_reg_save[i] = cbc_readl(i);
310 
311 	return 0;
312 }
313 
brcmstb_cpu_credit_reg_resume(void)314 static void brcmstb_cpu_credit_reg_resume(void)
315 {
316 	unsigned int i;
317 
318 	if (!cpubiuctrl_base)
319 		return;
320 
321 	for (i = 0; i < NUM_CPU_BIUCTRL_REGS; i++)
322 		cbc_writel(cpubiuctrl_reg_save[i], i);
323 }
324 
325 static struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
326 	.suspend = brcmstb_cpu_credit_reg_suspend,
327 	.resume = brcmstb_cpu_credit_reg_resume,
328 };
329 #endif
330 
331 
brcmstb_biuctrl_init(void)332 static int __init brcmstb_biuctrl_init(void)
333 {
334 	struct device_node *np;
335 	int ret;
336 
337 	/* We might be running on a multi-platform kernel, don't make this a
338 	 * fatal error, just bail out early
339 	 */
340 	np = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
341 	if (!np)
342 		return 0;
343 
344 	ret = setup_hifcpubiuctrl_regs(np);
345 	if (ret)
346 		goto out_put;
347 
348 	ret = mcp_write_pairing_set();
349 	if (ret) {
350 		pr_err("MCP: Unable to disable write pairing!\n");
351 		goto out_put;
352 	}
353 
354 	a72_b53_rac_enable_all(np);
355 	mcp_a72_b53_set();
356 #ifdef CONFIG_PM_SLEEP
357 	register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
358 #endif
359 	ret = 0;
360 out_put:
361 	of_node_put(np);
362 	return ret;
363 }
364 early_initcall(brcmstb_biuctrl_init);
365