xref: /openbmc/linux/drivers/cache/ax45mp_cache.c (revision d34599bcd2e4e93a28d5904bf94bc7dafc511f04)
1*d34599bcSLad Prabhakar // SPDX-License-Identifier: GPL-2.0
2*d34599bcSLad Prabhakar /*
3*d34599bcSLad Prabhakar  * non-coherent cache functions for Andes AX45MP
4*d34599bcSLad Prabhakar  *
5*d34599bcSLad Prabhakar  * Copyright (C) 2023 Renesas Electronics Corp.
6*d34599bcSLad Prabhakar  */
7*d34599bcSLad Prabhakar 
8*d34599bcSLad Prabhakar #include <linux/cacheflush.h>
9*d34599bcSLad Prabhakar #include <linux/cacheinfo.h>
10*d34599bcSLad Prabhakar #include <linux/dma-direction.h>
11*d34599bcSLad Prabhakar #include <linux/of_address.h>
12*d34599bcSLad Prabhakar #include <linux/of_platform.h>
13*d34599bcSLad Prabhakar 
14*d34599bcSLad Prabhakar #include <asm/dma-noncoherent.h>
15*d34599bcSLad Prabhakar 
16*d34599bcSLad Prabhakar /* L2 cache registers */
17*d34599bcSLad Prabhakar #define AX45MP_L2C_REG_CTL_OFFSET		0x8
18*d34599bcSLad Prabhakar 
19*d34599bcSLad Prabhakar #define AX45MP_L2C_REG_C0_CMD_OFFSET		0x40
20*d34599bcSLad Prabhakar #define AX45MP_L2C_REG_C0_ACC_OFFSET		0x48
21*d34599bcSLad Prabhakar #define AX45MP_L2C_REG_STATUS_OFFSET		0x80
22*d34599bcSLad Prabhakar 
23*d34599bcSLad Prabhakar /* D-cache operation */
24*d34599bcSLad Prabhakar #define AX45MP_CCTL_L1D_VA_INVAL		0 /* Invalidate an L1 cache entry */
25*d34599bcSLad Prabhakar #define AX45MP_CCTL_L1D_VA_WB			1 /* Write-back an L1 cache entry */
26*d34599bcSLad Prabhakar 
27*d34599bcSLad Prabhakar /* L2 CCTL status */
28*d34599bcSLad Prabhakar #define AX45MP_CCTL_L2_STATUS_IDLE		0
29*d34599bcSLad Prabhakar 
30*d34599bcSLad Prabhakar /* L2 CCTL status cores mask */
31*d34599bcSLad Prabhakar #define AX45MP_CCTL_L2_STATUS_C0_MASK		0xf
32*d34599bcSLad Prabhakar 
33*d34599bcSLad Prabhakar /* L2 cache operation */
34*d34599bcSLad Prabhakar #define AX45MP_CCTL_L2_PA_INVAL			0x8 /* Invalidate an L2 cache entry */
35*d34599bcSLad Prabhakar #define AX45MP_CCTL_L2_PA_WB			0x9 /* Write-back an L2 cache entry */
36*d34599bcSLad Prabhakar 
37*d34599bcSLad Prabhakar #define AX45MP_L2C_REG_PER_CORE_OFFSET		0x10
38*d34599bcSLad Prabhakar #define AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET	4
39*d34599bcSLad Prabhakar 
40*d34599bcSLad Prabhakar #define AX45MP_L2C_REG_CN_CMD_OFFSET(n)	\
41*d34599bcSLad Prabhakar 	(AX45MP_L2C_REG_C0_CMD_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
42*d34599bcSLad Prabhakar #define AX45MP_L2C_REG_CN_ACC_OFFSET(n)	\
43*d34599bcSLad Prabhakar 	(AX45MP_L2C_REG_C0_ACC_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
44*d34599bcSLad Prabhakar #define AX45MP_CCTL_L2_STATUS_CN_MASK(n)	\
45*d34599bcSLad Prabhakar 	(AX45MP_CCTL_L2_STATUS_C0_MASK << ((n) * AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET))
46*d34599bcSLad Prabhakar 
47*d34599bcSLad Prabhakar #define AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM	0x80b
48*d34599bcSLad Prabhakar #define AX45MP_CCTL_REG_UCCTLCOMMAND_NUM	0x80c
49*d34599bcSLad Prabhakar 
50*d34599bcSLad Prabhakar #define AX45MP_CACHE_LINE_SIZE			64
51*d34599bcSLad Prabhakar 
52*d34599bcSLad Prabhakar struct ax45mp_priv {
53*d34599bcSLad Prabhakar 	void __iomem *l2c_base;
54*d34599bcSLad Prabhakar 	u32 ax45mp_cache_line_size;
55*d34599bcSLad Prabhakar };
56*d34599bcSLad Prabhakar 
57*d34599bcSLad Prabhakar static struct ax45mp_priv ax45mp_priv;
58*d34599bcSLad Prabhakar 
59*d34599bcSLad Prabhakar /* L2 Cache operations */
60*d34599bcSLad Prabhakar static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
61*d34599bcSLad Prabhakar {
62*d34599bcSLad Prabhakar 	return readl(ax45mp_priv.l2c_base + AX45MP_L2C_REG_STATUS_OFFSET);
63*d34599bcSLad Prabhakar }
64*d34599bcSLad Prabhakar 
65*d34599bcSLad Prabhakar static void ax45mp_cpu_cache_operation(unsigned long start, unsigned long end,
66*d34599bcSLad Prabhakar 				       unsigned int l1_op, unsigned int l2_op)
67*d34599bcSLad Prabhakar {
68*d34599bcSLad Prabhakar 	unsigned long line_size = ax45mp_priv.ax45mp_cache_line_size;
69*d34599bcSLad Prabhakar 	void __iomem *base = ax45mp_priv.l2c_base;
70*d34599bcSLad Prabhakar 	int mhartid = smp_processor_id();
71*d34599bcSLad Prabhakar 	unsigned long pa;
72*d34599bcSLad Prabhakar 
73*d34599bcSLad Prabhakar 	while (end > start) {
74*d34599bcSLad Prabhakar 		csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
75*d34599bcSLad Prabhakar 		csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, l1_op);
76*d34599bcSLad Prabhakar 
77*d34599bcSLad Prabhakar 		pa = virt_to_phys((void *)start);
78*d34599bcSLad Prabhakar 		writel(pa, base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid));
79*d34599bcSLad Prabhakar 		writel(l2_op, base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid));
80*d34599bcSLad Prabhakar 		while ((ax45mp_cpu_l2c_get_cctl_status() &
81*d34599bcSLad Prabhakar 			AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
82*d34599bcSLad Prabhakar 			AX45MP_CCTL_L2_STATUS_IDLE)
83*d34599bcSLad Prabhakar 			;
84*d34599bcSLad Prabhakar 
85*d34599bcSLad Prabhakar 		start += line_size;
86*d34599bcSLad Prabhakar 	}
87*d34599bcSLad Prabhakar }
88*d34599bcSLad Prabhakar 
89*d34599bcSLad Prabhakar /* Write-back L1 and L2 cache entry */
90*d34599bcSLad Prabhakar static inline void ax45mp_cpu_dcache_wb_range(unsigned long start, unsigned long end)
91*d34599bcSLad Prabhakar {
92*d34599bcSLad Prabhakar 	ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_WB,
93*d34599bcSLad Prabhakar 				   AX45MP_CCTL_L2_PA_WB);
94*d34599bcSLad Prabhakar }
95*d34599bcSLad Prabhakar 
96*d34599bcSLad Prabhakar /* Invalidate the L1 and L2 cache entry */
97*d34599bcSLad Prabhakar static inline void ax45mp_cpu_dcache_inval_range(unsigned long start, unsigned long end)
98*d34599bcSLad Prabhakar {
99*d34599bcSLad Prabhakar 	ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_INVAL,
100*d34599bcSLad Prabhakar 				   AX45MP_CCTL_L2_PA_INVAL);
101*d34599bcSLad Prabhakar }
102*d34599bcSLad Prabhakar 
103*d34599bcSLad Prabhakar static void ax45mp_dma_cache_inv(phys_addr_t paddr, size_t size)
104*d34599bcSLad Prabhakar {
105*d34599bcSLad Prabhakar 	unsigned long start = (unsigned long)phys_to_virt(paddr);
106*d34599bcSLad Prabhakar 	unsigned long end = start + size;
107*d34599bcSLad Prabhakar 	unsigned long line_size;
108*d34599bcSLad Prabhakar 	unsigned long flags;
109*d34599bcSLad Prabhakar 
110*d34599bcSLad Prabhakar 	if (unlikely(start == end))
111*d34599bcSLad Prabhakar 		return;
112*d34599bcSLad Prabhakar 
113*d34599bcSLad Prabhakar 	line_size = ax45mp_priv.ax45mp_cache_line_size;
114*d34599bcSLad Prabhakar 
115*d34599bcSLad Prabhakar 	start = start & (~(line_size - 1));
116*d34599bcSLad Prabhakar 	end = ((end + line_size - 1) & (~(line_size - 1)));
117*d34599bcSLad Prabhakar 
118*d34599bcSLad Prabhakar 	local_irq_save(flags);
119*d34599bcSLad Prabhakar 
120*d34599bcSLad Prabhakar 	ax45mp_cpu_dcache_inval_range(start, end);
121*d34599bcSLad Prabhakar 
122*d34599bcSLad Prabhakar 	local_irq_restore(flags);
123*d34599bcSLad Prabhakar }
124*d34599bcSLad Prabhakar 
125*d34599bcSLad Prabhakar static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
126*d34599bcSLad Prabhakar {
127*d34599bcSLad Prabhakar 	unsigned long start = (unsigned long)phys_to_virt(paddr);
128*d34599bcSLad Prabhakar 	unsigned long end = start + size;
129*d34599bcSLad Prabhakar 	unsigned long line_size;
130*d34599bcSLad Prabhakar 	unsigned long flags;
131*d34599bcSLad Prabhakar 
132*d34599bcSLad Prabhakar 	line_size = ax45mp_priv.ax45mp_cache_line_size;
133*d34599bcSLad Prabhakar 	start = start & (~(line_size - 1));
134*d34599bcSLad Prabhakar 	local_irq_save(flags);
135*d34599bcSLad Prabhakar 	ax45mp_cpu_dcache_wb_range(start, end);
136*d34599bcSLad Prabhakar 	local_irq_restore(flags);
137*d34599bcSLad Prabhakar }
138*d34599bcSLad Prabhakar 
139*d34599bcSLad Prabhakar static void ax45mp_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
140*d34599bcSLad Prabhakar {
141*d34599bcSLad Prabhakar 	ax45mp_dma_cache_wback(paddr, size);
142*d34599bcSLad Prabhakar 	ax45mp_dma_cache_inv(paddr, size);
143*d34599bcSLad Prabhakar }
144*d34599bcSLad Prabhakar 
145*d34599bcSLad Prabhakar static int ax45mp_get_l2_line_size(struct device_node *np)
146*d34599bcSLad Prabhakar {
147*d34599bcSLad Prabhakar 	int ret;
148*d34599bcSLad Prabhakar 
149*d34599bcSLad Prabhakar 	ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv.ax45mp_cache_line_size);
150*d34599bcSLad Prabhakar 	if (ret) {
151*d34599bcSLad Prabhakar 		pr_err("Failed to get cache-line-size, defaulting to 64 bytes\n");
152*d34599bcSLad Prabhakar 		return ret;
153*d34599bcSLad Prabhakar 	}
154*d34599bcSLad Prabhakar 
155*d34599bcSLad Prabhakar 	if (ax45mp_priv.ax45mp_cache_line_size != AX45MP_CACHE_LINE_SIZE) {
156*d34599bcSLad Prabhakar 		pr_err("Expected cache-line-size to be 64 bytes (found:%u)\n",
157*d34599bcSLad Prabhakar 		       ax45mp_priv.ax45mp_cache_line_size);
158*d34599bcSLad Prabhakar 		return -EINVAL;
159*d34599bcSLad Prabhakar 	}
160*d34599bcSLad Prabhakar 
161*d34599bcSLad Prabhakar 	return 0;
162*d34599bcSLad Prabhakar }
163*d34599bcSLad Prabhakar 
164*d34599bcSLad Prabhakar static const struct riscv_nonstd_cache_ops ax45mp_cmo_ops __initdata = {
165*d34599bcSLad Prabhakar 	.wback = &ax45mp_dma_cache_wback,
166*d34599bcSLad Prabhakar 	.inv = &ax45mp_dma_cache_inv,
167*d34599bcSLad Prabhakar 	.wback_inv = &ax45mp_dma_cache_wback_inv,
168*d34599bcSLad Prabhakar };
169*d34599bcSLad Prabhakar 
170*d34599bcSLad Prabhakar static const struct of_device_id ax45mp_cache_ids[] = {
171*d34599bcSLad Prabhakar 	{ .compatible = "andestech,ax45mp-cache" },
172*d34599bcSLad Prabhakar 	{ /* sentinel */ }
173*d34599bcSLad Prabhakar };
174*d34599bcSLad Prabhakar 
175*d34599bcSLad Prabhakar static int __init ax45mp_cache_init(void)
176*d34599bcSLad Prabhakar {
177*d34599bcSLad Prabhakar 	struct device_node *np;
178*d34599bcSLad Prabhakar 	struct resource res;
179*d34599bcSLad Prabhakar 	int ret;
180*d34599bcSLad Prabhakar 
181*d34599bcSLad Prabhakar 	np = of_find_matching_node(NULL, ax45mp_cache_ids);
182*d34599bcSLad Prabhakar 	if (!of_device_is_available(np))
183*d34599bcSLad Prabhakar 		return -ENODEV;
184*d34599bcSLad Prabhakar 
185*d34599bcSLad Prabhakar 	ret = of_address_to_resource(np, 0, &res);
186*d34599bcSLad Prabhakar 	if (ret)
187*d34599bcSLad Prabhakar 		return ret;
188*d34599bcSLad Prabhakar 
189*d34599bcSLad Prabhakar 	/*
190*d34599bcSLad Prabhakar 	 * If IOCP is present on the Andes AX45MP core riscv_cbom_block_size
191*d34599bcSLad Prabhakar 	 * will be 0 for sure, so we can definitely rely on it. If
192*d34599bcSLad Prabhakar 	 * riscv_cbom_block_size = 0 we don't need to handle CMO using SW any
193*d34599bcSLad Prabhakar 	 * more so we just return success here and only if its being set we
194*d34599bcSLad Prabhakar 	 * continue further in the probe path.
195*d34599bcSLad Prabhakar 	 */
196*d34599bcSLad Prabhakar 	if (!riscv_cbom_block_size)
197*d34599bcSLad Prabhakar 		return 0;
198*d34599bcSLad Prabhakar 
199*d34599bcSLad Prabhakar 	ax45mp_priv.l2c_base = ioremap(res.start, resource_size(&res));
200*d34599bcSLad Prabhakar 	if (!ax45mp_priv.l2c_base)
201*d34599bcSLad Prabhakar 		return -ENOMEM;
202*d34599bcSLad Prabhakar 
203*d34599bcSLad Prabhakar 	ret = ax45mp_get_l2_line_size(np);
204*d34599bcSLad Prabhakar 	if (ret) {
205*d34599bcSLad Prabhakar 		iounmap(ax45mp_priv.l2c_base);
206*d34599bcSLad Prabhakar 		return ret;
207*d34599bcSLad Prabhakar 	}
208*d34599bcSLad Prabhakar 
209*d34599bcSLad Prabhakar 	riscv_noncoherent_register_cache_ops(&ax45mp_cmo_ops);
210*d34599bcSLad Prabhakar 
211*d34599bcSLad Prabhakar 	return 0;
212*d34599bcSLad Prabhakar }
213*d34599bcSLad Prabhakar early_initcall(ax45mp_cache_init);
214