1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * non-coherent cache functions for Andes AX45MP
4 *
5 * Copyright (C) 2023 Renesas Electronics Corp.
6 */
7
8 #include <linux/cacheflush.h>
9 #include <linux/cacheinfo.h>
10 #include <linux/dma-direction.h>
11 #include <linux/of_address.h>
12 #include <linux/of_platform.h>
13
14 #include <asm/dma-noncoherent.h>
15
16 /* L2 cache registers */
17 #define AX45MP_L2C_REG_CTL_OFFSET 0x8
18
19 #define AX45MP_L2C_REG_C0_CMD_OFFSET 0x40
20 #define AX45MP_L2C_REG_C0_ACC_OFFSET 0x48
21 #define AX45MP_L2C_REG_STATUS_OFFSET 0x80
22
23 /* D-cache operation */
24 #define AX45MP_CCTL_L1D_VA_INVAL 0 /* Invalidate an L1 cache entry */
25 #define AX45MP_CCTL_L1D_VA_WB 1 /* Write-back an L1 cache entry */
26
27 /* L2 CCTL status */
28 #define AX45MP_CCTL_L2_STATUS_IDLE 0
29
30 /* L2 CCTL status cores mask */
31 #define AX45MP_CCTL_L2_STATUS_C0_MASK 0xf
32
33 /* L2 cache operation */
34 #define AX45MP_CCTL_L2_PA_INVAL 0x8 /* Invalidate an L2 cache entry */
35 #define AX45MP_CCTL_L2_PA_WB 0x9 /* Write-back an L2 cache entry */
36
37 #define AX45MP_L2C_REG_PER_CORE_OFFSET 0x10
38 #define AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET 4
39
40 #define AX45MP_L2C_REG_CN_CMD_OFFSET(n) \
41 (AX45MP_L2C_REG_C0_CMD_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
42 #define AX45MP_L2C_REG_CN_ACC_OFFSET(n) \
43 (AX45MP_L2C_REG_C0_ACC_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
44 #define AX45MP_CCTL_L2_STATUS_CN_MASK(n) \
45 (AX45MP_CCTL_L2_STATUS_C0_MASK << ((n) * AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET))
46
47 #define AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM 0x80b
48 #define AX45MP_CCTL_REG_UCCTLCOMMAND_NUM 0x80c
49
50 #define AX45MP_CACHE_LINE_SIZE 64
51
52 struct ax45mp_priv {
53 void __iomem *l2c_base;
54 u32 ax45mp_cache_line_size;
55 };
56
57 static struct ax45mp_priv ax45mp_priv;
58
59 /* L2 Cache operations */
ax45mp_cpu_l2c_get_cctl_status(void)60 static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
61 {
62 return readl(ax45mp_priv.l2c_base + AX45MP_L2C_REG_STATUS_OFFSET);
63 }
64
ax45mp_cpu_cache_operation(unsigned long start,unsigned long end,unsigned int l1_op,unsigned int l2_op)65 static void ax45mp_cpu_cache_operation(unsigned long start, unsigned long end,
66 unsigned int l1_op, unsigned int l2_op)
67 {
68 unsigned long line_size = ax45mp_priv.ax45mp_cache_line_size;
69 void __iomem *base = ax45mp_priv.l2c_base;
70 int mhartid = smp_processor_id();
71 unsigned long pa;
72
73 while (end > start) {
74 csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
75 csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, l1_op);
76
77 pa = virt_to_phys((void *)start);
78 writel(pa, base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid));
79 writel(l2_op, base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid));
80 while ((ax45mp_cpu_l2c_get_cctl_status() &
81 AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
82 AX45MP_CCTL_L2_STATUS_IDLE)
83 ;
84
85 start += line_size;
86 }
87 }
88
89 /* Write-back L1 and L2 cache entry */
ax45mp_cpu_dcache_wb_range(unsigned long start,unsigned long end)90 static inline void ax45mp_cpu_dcache_wb_range(unsigned long start, unsigned long end)
91 {
92 ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_WB,
93 AX45MP_CCTL_L2_PA_WB);
94 }
95
96 /* Invalidate the L1 and L2 cache entry */
ax45mp_cpu_dcache_inval_range(unsigned long start,unsigned long end)97 static inline void ax45mp_cpu_dcache_inval_range(unsigned long start, unsigned long end)
98 {
99 ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_INVAL,
100 AX45MP_CCTL_L2_PA_INVAL);
101 }
102
ax45mp_dma_cache_inv(phys_addr_t paddr,size_t size)103 static void ax45mp_dma_cache_inv(phys_addr_t paddr, size_t size)
104 {
105 unsigned long start = (unsigned long)phys_to_virt(paddr);
106 unsigned long end = start + size;
107 unsigned long line_size;
108 unsigned long flags;
109
110 if (unlikely(start == end))
111 return;
112
113 line_size = ax45mp_priv.ax45mp_cache_line_size;
114
115 start = start & (~(line_size - 1));
116 end = ((end + line_size - 1) & (~(line_size - 1)));
117
118 local_irq_save(flags);
119
120 ax45mp_cpu_dcache_inval_range(start, end);
121
122 local_irq_restore(flags);
123 }
124
ax45mp_dma_cache_wback(phys_addr_t paddr,size_t size)125 static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
126 {
127 unsigned long start = (unsigned long)phys_to_virt(paddr);
128 unsigned long end = start + size;
129 unsigned long line_size;
130 unsigned long flags;
131
132 if (unlikely(start == end))
133 return;
134
135 line_size = ax45mp_priv.ax45mp_cache_line_size;
136 start = start & (~(line_size - 1));
137 end = ((end + line_size - 1) & (~(line_size - 1)));
138 local_irq_save(flags);
139 ax45mp_cpu_dcache_wb_range(start, end);
140 local_irq_restore(flags);
141 }
142
ax45mp_dma_cache_wback_inv(phys_addr_t paddr,size_t size)143 static void ax45mp_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
144 {
145 ax45mp_dma_cache_wback(paddr, size);
146 ax45mp_dma_cache_inv(paddr, size);
147 }
148
ax45mp_get_l2_line_size(struct device_node * np)149 static int ax45mp_get_l2_line_size(struct device_node *np)
150 {
151 int ret;
152
153 ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv.ax45mp_cache_line_size);
154 if (ret) {
155 pr_err("Failed to get cache-line-size, defaulting to 64 bytes\n");
156 return ret;
157 }
158
159 if (ax45mp_priv.ax45mp_cache_line_size != AX45MP_CACHE_LINE_SIZE) {
160 pr_err("Expected cache-line-size to be 64 bytes (found:%u)\n",
161 ax45mp_priv.ax45mp_cache_line_size);
162 return -EINVAL;
163 }
164
165 return 0;
166 }
167
168 static const struct riscv_nonstd_cache_ops ax45mp_cmo_ops __initdata = {
169 .wback = &ax45mp_dma_cache_wback,
170 .inv = &ax45mp_dma_cache_inv,
171 .wback_inv = &ax45mp_dma_cache_wback_inv,
172 };
173
174 static const struct of_device_id ax45mp_cache_ids[] = {
175 { .compatible = "andestech,ax45mp-cache" },
176 { /* sentinel */ }
177 };
178
ax45mp_cache_init(void)179 static int __init ax45mp_cache_init(void)
180 {
181 struct device_node *np;
182 struct resource res;
183 int ret;
184
185 np = of_find_matching_node(NULL, ax45mp_cache_ids);
186 if (!of_device_is_available(np))
187 return -ENODEV;
188
189 ret = of_address_to_resource(np, 0, &res);
190 if (ret)
191 return ret;
192
193 /*
194 * If IOCP is present on the Andes AX45MP core riscv_cbom_block_size
195 * will be 0 for sure, so we can definitely rely on it. If
196 * riscv_cbom_block_size = 0 we don't need to handle CMO using SW any
197 * more so we just return success here and only if its being set we
198 * continue further in the probe path.
199 */
200 if (!riscv_cbom_block_size)
201 return 0;
202
203 ax45mp_priv.l2c_base = ioremap(res.start, resource_size(&res));
204 if (!ax45mp_priv.l2c_base)
205 return -ENOMEM;
206
207 ret = ax45mp_get_l2_line_size(np);
208 if (ret) {
209 iounmap(ax45mp_priv.l2c_base);
210 return ret;
211 }
212
213 riscv_noncoherent_register_cache_ops(&ax45mp_cmo_ops);
214
215 return 0;
216 }
217 early_initcall(ax45mp_cache_init);
218