1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #include <linux/acpi.h>
7 #include <linux/cpu.h>
8 #include <linux/ctype.h>
9 #include <linux/init.h>
10 #include <linux/seq_file.h>
11 #include <linux/of.h>
12 #include <asm/acpi.h>
13 #include <asm/cpufeature.h>
14 #include <asm/csr.h>
15 #include <asm/hwcap.h>
16 #include <asm/sbi.h>
17 #include <asm/smp.h>
18 #include <asm/pgtable.h>
19
arch_match_cpu_phys_id(int cpu,u64 phys_id)20 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
21 {
22 return phys_id == cpuid_to_hartid_map(cpu);
23 }
24
25 /*
26 * Returns the hart ID of the given device tree node, or -ENODEV if the node
27 * isn't an enabled and valid RISC-V hart node.
28 */
riscv_of_processor_hartid(struct device_node * node,unsigned long * hart)29 int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
30 {
31 int cpu;
32
33 *hart = (unsigned long)of_get_cpu_hwid(node, 0);
34 if (*hart == ~0UL) {
35 pr_warn("Found CPU without hart ID\n");
36 return -ENODEV;
37 }
38
39 cpu = riscv_hartid_to_cpuid(*hart);
40 if (cpu < 0)
41 return cpu;
42
43 if (!cpu_possible(cpu))
44 return -ENODEV;
45
46 return 0;
47 }
48
riscv_early_of_processor_hartid(struct device_node * node,unsigned long * hart)49 int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart)
50 {
51 const char *isa;
52
53 if (!of_device_is_compatible(node, "riscv")) {
54 pr_warn("Found incompatible CPU\n");
55 return -ENODEV;
56 }
57
58 *hart = (unsigned long)of_get_cpu_hwid(node, 0);
59 if (*hart == ~0UL) {
60 pr_warn("Found CPU without hart ID\n");
61 return -ENODEV;
62 }
63
64 if (!of_device_is_available(node)) {
65 pr_info("CPU with hartid=%lu is not available\n", *hart);
66 return -ENODEV;
67 }
68
69 if (of_property_read_string(node, "riscv,isa-base", &isa))
70 goto old_interface;
71
72 if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32i", 5)) {
73 pr_warn("CPU with hartid=%lu does not support rv32i", *hart);
74 return -ENODEV;
75 }
76
77 if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64i", 5)) {
78 pr_warn("CPU with hartid=%lu does not support rv64i", *hart);
79 return -ENODEV;
80 }
81
82 if (!of_property_present(node, "riscv,isa-extensions"))
83 return -ENODEV;
84
85 if (of_property_match_string(node, "riscv,isa-extensions", "i") < 0 ||
86 of_property_match_string(node, "riscv,isa-extensions", "m") < 0 ||
87 of_property_match_string(node, "riscv,isa-extensions", "a") < 0) {
88 pr_warn("CPU with hartid=%lu does not support ima", *hart);
89 return -ENODEV;
90 }
91
92 return 0;
93
94 old_interface:
95 if (!riscv_isa_fallback) {
96 pr_warn("CPU with hartid=%lu is invalid: this kernel does not parse \"riscv,isa\"",
97 *hart);
98 return -ENODEV;
99 }
100
101 if (of_property_read_string(node, "riscv,isa", &isa)) {
102 pr_warn("CPU with hartid=%lu has no \"riscv,isa-base\" or \"riscv,isa\" property\n",
103 *hart);
104 return -ENODEV;
105 }
106
107 if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32ima", 7)) {
108 pr_warn("CPU with hartid=%lu does not support rv32ima", *hart);
109 return -ENODEV;
110 }
111
112 if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64ima", 7)) {
113 pr_warn("CPU with hartid=%lu does not support rv64ima", *hart);
114 return -ENODEV;
115 }
116
117 return 0;
118 }
119
120 /*
121 * Find hart ID of the CPU DT node under which given DT node falls.
122 *
123 * To achieve this, we walk up the DT tree until we find an active
124 * RISC-V core (HART) node and extract the cpuid from it.
125 */
riscv_of_parent_hartid(struct device_node * node,unsigned long * hartid)126 int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
127 {
128 for (; node; node = node->parent) {
129 if (of_device_is_compatible(node, "riscv")) {
130 *hartid = (unsigned long)of_get_cpu_hwid(node, 0);
131 if (*hartid == ~0UL) {
132 pr_warn("Found CPU without hart ID\n");
133 return -ENODEV;
134 }
135 return 0;
136 }
137 }
138
139 return -1;
140 }
141
riscv_get_marchid(void)142 unsigned long __init riscv_get_marchid(void)
143 {
144 struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
145
146 #if IS_ENABLED(CONFIG_RISCV_SBI)
147 ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
148 #elif IS_ENABLED(CONFIG_RISCV_M_MODE)
149 ci->marchid = csr_read(CSR_MARCHID);
150 #else
151 ci->marchid = 0;
152 #endif
153 return ci->marchid;
154 }
155
riscv_get_mvendorid(void)156 unsigned long __init riscv_get_mvendorid(void)
157 {
158 struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
159
160 #if IS_ENABLED(CONFIG_RISCV_SBI)
161 ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
162 #elif IS_ENABLED(CONFIG_RISCV_M_MODE)
163 ci->mvendorid = csr_read(CSR_MVENDORID);
164 #else
165 ci->mvendorid = 0;
166 #endif
167 return ci->mvendorid;
168 }
169
170 DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
171
riscv_cached_mvendorid(unsigned int cpu_id)172 unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
173 {
174 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
175
176 return ci->mvendorid;
177 }
178 EXPORT_SYMBOL(riscv_cached_mvendorid);
179
riscv_cached_marchid(unsigned int cpu_id)180 unsigned long riscv_cached_marchid(unsigned int cpu_id)
181 {
182 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
183
184 return ci->marchid;
185 }
186 EXPORT_SYMBOL(riscv_cached_marchid);
187
riscv_cached_mimpid(unsigned int cpu_id)188 unsigned long riscv_cached_mimpid(unsigned int cpu_id)
189 {
190 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
191
192 return ci->mimpid;
193 }
194 EXPORT_SYMBOL(riscv_cached_mimpid);
195
riscv_cpuinfo_starting(unsigned int cpu)196 static int riscv_cpuinfo_starting(unsigned int cpu)
197 {
198 struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
199
200 #if IS_ENABLED(CONFIG_RISCV_SBI)
201 if (!ci->mvendorid)
202 ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
203 if (!ci->marchid)
204 ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
205 ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid();
206 #elif IS_ENABLED(CONFIG_RISCV_M_MODE)
207 if (!ci->mvendorid)
208 ci->mvendorid = csr_read(CSR_MVENDORID);
209 if (!ci->marchid)
210 ci->marchid = csr_read(CSR_MARCHID);
211 ci->mimpid = csr_read(CSR_MIMPID);
212 #else
213 ci->mvendorid = 0;
214 ci->marchid = 0;
215 ci->mimpid = 0;
216 #endif
217
218 return 0;
219 }
220
riscv_cpuinfo_init(void)221 static int __init riscv_cpuinfo_init(void)
222 {
223 int ret;
224
225 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/cpuinfo:starting",
226 riscv_cpuinfo_starting, NULL);
227 if (ret < 0) {
228 pr_err("cpuinfo: failed to register hotplug callbacks.\n");
229 return ret;
230 }
231
232 return 0;
233 }
234 arch_initcall(riscv_cpuinfo_init);
235
236 #ifdef CONFIG_PROC_FS
237
print_isa(struct seq_file * f)238 static void print_isa(struct seq_file *f)
239 {
240 seq_puts(f, "isa\t\t: ");
241
242 if (IS_ENABLED(CONFIG_32BIT))
243 seq_write(f, "rv32", 4);
244 else
245 seq_write(f, "rv64", 4);
246
247 for (int i = 0; i < riscv_isa_ext_count; i++) {
248 if (!__riscv_isa_extension_available(NULL, riscv_isa_ext[i].id))
249 continue;
250
251 /* Only multi-letter extensions are split by underscores */
252 if (strnlen(riscv_isa_ext[i].name, 2) != 1)
253 seq_puts(f, "_");
254
255 seq_printf(f, "%s", riscv_isa_ext[i].name);
256 }
257
258 seq_puts(f, "\n");
259 }
260
print_mmu(struct seq_file * f)261 static void print_mmu(struct seq_file *f)
262 {
263 const char *sv_type;
264
265 #ifdef CONFIG_MMU
266 #if defined(CONFIG_32BIT)
267 sv_type = "sv32";
268 #elif defined(CONFIG_64BIT)
269 if (pgtable_l5_enabled)
270 sv_type = "sv57";
271 else if (pgtable_l4_enabled)
272 sv_type = "sv48";
273 else
274 sv_type = "sv39";
275 #endif
276 #else
277 sv_type = "none";
278 #endif /* CONFIG_MMU */
279 seq_printf(f, "mmu\t\t: %s\n", sv_type);
280 }
281
c_start(struct seq_file * m,loff_t * pos)282 static void *c_start(struct seq_file *m, loff_t *pos)
283 {
284 if (*pos == nr_cpu_ids)
285 return NULL;
286
287 *pos = cpumask_next(*pos - 1, cpu_online_mask);
288 if ((*pos) < nr_cpu_ids)
289 return (void *)(uintptr_t)(1 + *pos);
290 return NULL;
291 }
292
c_next(struct seq_file * m,void * v,loff_t * pos)293 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
294 {
295 (*pos)++;
296 return c_start(m, pos);
297 }
298
c_stop(struct seq_file * m,void * v)299 static void c_stop(struct seq_file *m, void *v)
300 {
301 }
302
c_show(struct seq_file * m,void * v)303 static int c_show(struct seq_file *m, void *v)
304 {
305 unsigned long cpu_id = (unsigned long)v - 1;
306 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
307 struct device_node *node;
308 const char *compat;
309
310 seq_printf(m, "processor\t: %lu\n", cpu_id);
311 seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
312 print_isa(m);
313 print_mmu(m);
314
315 if (acpi_disabled) {
316 node = of_get_cpu_node(cpu_id, NULL);
317
318 if (!of_property_read_string(node, "compatible", &compat) &&
319 strcmp(compat, "riscv"))
320 seq_printf(m, "uarch\t\t: %s\n", compat);
321
322 of_node_put(node);
323 }
324
325 seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
326 seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid);
327 seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid);
328 seq_puts(m, "\n");
329
330 return 0;
331 }
332
333 const struct seq_operations cpuinfo_op = {
334 .start = c_start,
335 .next = c_next,
336 .stop = c_stop,
337 .show = c_show
338 };
339
340 #endif /* CONFIG_PROC_FS */
341