1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copied from arch/arm64/kernel/cpufeature.c
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Copyright (C) 2017 SiFive
7 */
8
9 #include <linux/acpi.h>
10 #include <linux/bitmap.h>
11 #include <linux/ctype.h>
12 #include <linux/log2.h>
13 #include <linux/memory.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <asm/acpi.h>
17 #include <asm/alternative.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cpufeature.h>
20 #include <asm/hwcap.h>
21 #include <asm/hwprobe.h>
22 #include <asm/patch.h>
23 #include <asm/processor.h>
24 #include <asm/sbi.h>
25 #include <asm/vector.h>
26
27 #include "copy-unaligned.h"
28
29 #define NUM_ALPHA_EXTS ('z' - 'a' + 1)
30
31 #define MISALIGNED_ACCESS_JIFFIES_LG2 1
32 #define MISALIGNED_BUFFER_SIZE 0x4000
33 #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
34
35 unsigned long elf_hwcap __read_mostly;
36
37 /* Host ISA bitmap */
38 static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
39
40 /* Per-cpu ISA extensions. */
41 struct riscv_isainfo hart_isa[NR_CPUS];
42
43 /* Performance information */
44 DEFINE_PER_CPU(long, misaligned_access_speed);
45
46 /**
47 * riscv_isa_extension_base() - Get base extension word
48 *
49 * @isa_bitmap: ISA bitmap to use
50 * Return: base extension word as unsigned long value
51 *
52 * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
53 */
riscv_isa_extension_base(const unsigned long * isa_bitmap)54 unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
55 {
56 if (!isa_bitmap)
57 return riscv_isa[0];
58 return isa_bitmap[0];
59 }
60 EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
61
62 /**
63 * __riscv_isa_extension_available() - Check whether given extension
64 * is available or not
65 *
66 * @isa_bitmap: ISA bitmap to use
67 * @bit: bit position of the desired extension
68 * Return: true or false
69 *
70 * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
71 */
__riscv_isa_extension_available(const unsigned long * isa_bitmap,int bit)72 bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
73 {
74 const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
75
76 if (bit >= RISCV_ISA_EXT_MAX)
77 return false;
78
79 return test_bit(bit, bmap) ? true : false;
80 }
81 EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
82
riscv_isa_extension_check(int id)83 static bool riscv_isa_extension_check(int id)
84 {
85 switch (id) {
86 case RISCV_ISA_EXT_ZICBOM:
87 if (!riscv_cbom_block_size) {
88 pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n");
89 return false;
90 } else if (!is_power_of_2(riscv_cbom_block_size)) {
91 pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n");
92 return false;
93 }
94 return true;
95 case RISCV_ISA_EXT_ZICBOZ:
96 if (!riscv_cboz_block_size) {
97 pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n");
98 return false;
99 } else if (!is_power_of_2(riscv_cboz_block_size)) {
100 pr_err("cboz-block-size present, but is not a power-of-2\n");
101 return false;
102 }
103 return true;
104 }
105
106 return true;
107 }
108
109 #define __RISCV_ISA_EXT_DATA(_name, _id) { \
110 .name = #_name, \
111 .property = #_name, \
112 .id = _id, \
113 }
114
115 /*
116 * The canonical order of ISA extension names in the ISA string is defined in
117 * chapter 27 of the unprivileged specification.
118 *
119 * Ordinarily, for in-kernel data structures, this order is unimportant but
120 * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
121 *
122 * The specification uses vague wording, such as should, when it comes to
123 * ordering, so for our purposes the following rules apply:
124 *
125 * 1. All multi-letter extensions must be separated from other extensions by an
126 * underscore.
127 *
128 * 2. Additional standard extensions (starting with 'Z') must be sorted after
129 * single-letter extensions and before any higher-privileged extensions.
130 *
131 * 3. The first letter following the 'Z' conventionally indicates the most
132 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
133 * If multiple 'Z' extensions are named, they must be ordered first by
134 * category, then alphabetically within a category.
135 *
136 * 3. Standard supervisor-level extensions (starting with 'S') must be listed
137 * after standard unprivileged extensions. If multiple supervisor-level
138 * extensions are listed, they must be ordered alphabetically.
139 *
140 * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
141 * after any lower-privileged, standard extensions. If multiple
142 * machine-level extensions are listed, they must be ordered
143 * alphabetically.
144 *
145 * 5. Non-standard extensions (starting with 'X') must be listed after all
146 * standard extensions. If multiple non-standard extensions are listed, they
147 * must be ordered alphabetically.
148 *
149 * An example string following the order is:
150 * rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
151 *
152 * New entries to this struct should follow the ordering rules described above.
153 */
154 const struct riscv_isa_ext_data riscv_isa_ext[] = {
155 __RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
156 __RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
157 __RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
158 __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
159 __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
160 __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
161 __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
162 __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b),
163 __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k),
164 __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j),
165 __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p),
166 __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
167 __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
168 __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
169 __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
170 __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
171 __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
172 __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
173 __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
174 __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
175 __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
176 __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
177 __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
178 __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
179 __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
180 __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
181 __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
182 __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
183 __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
184 __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
185 };
186
187 const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
188
riscv_parse_isa_string(unsigned long * this_hwcap,struct riscv_isainfo * isainfo,unsigned long * isa2hwcap,const char * isa)189 static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
190 unsigned long *isa2hwcap, const char *isa)
191 {
192 /*
193 * For all possible cpus, we have already validated in
194 * the boot process that they at least contain "rv" and
195 * whichever of "32"/"64" this kernel supports, and so this
196 * section can be skipped.
197 */
198 isa += 4;
199
200 while (*isa) {
201 const char *ext = isa++;
202 const char *ext_end = isa;
203 bool ext_long = false, ext_err = false;
204
205 switch (*ext) {
206 case 's':
207 /*
208 * Workaround for invalid single-letter 's' & 'u'(QEMU).
209 * No need to set the bit in riscv_isa as 's' & 'u' are
210 * not valid ISA extensions. It works until multi-letter
211 * extension starting with "Su" appears.
212 */
213 if (ext[-1] != '_' && ext[1] == 'u') {
214 ++isa;
215 ext_err = true;
216 break;
217 }
218 fallthrough;
219 case 'S':
220 case 'x':
221 case 'X':
222 case 'z':
223 case 'Z':
224 /*
225 * Before attempting to parse the extension itself, we find its end.
226 * As multi-letter extensions must be split from other multi-letter
227 * extensions with an "_", the end of a multi-letter extension will
228 * either be the null character or the "_" at the start of the next
229 * multi-letter extension.
230 *
231 * Next, as the extensions version is currently ignored, we
232 * eliminate that portion. This is done by parsing backwards from
233 * the end of the extension, removing any numbers. This may be a
234 * major or minor number however, so the process is repeated if a
235 * minor number was found.
236 *
237 * ext_end is intended to represent the first character *after* the
238 * name portion of an extension, but will be decremented to the last
239 * character itself while eliminating the extensions version number.
240 * A simple re-increment solves this problem.
241 */
242 ext_long = true;
243 for (; *isa && *isa != '_'; ++isa)
244 if (unlikely(!isalnum(*isa)))
245 ext_err = true;
246
247 ext_end = isa;
248 if (unlikely(ext_err))
249 break;
250
251 if (!isdigit(ext_end[-1]))
252 break;
253
254 while (isdigit(*--ext_end))
255 ;
256
257 if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
258 ++ext_end;
259 break;
260 }
261
262 while (isdigit(*--ext_end))
263 ;
264
265 ++ext_end;
266 break;
267 default:
268 /*
269 * Things are a little easier for single-letter extensions, as they
270 * are parsed forwards.
271 *
272 * After checking that our starting position is valid, we need to
273 * ensure that, when isa was incremented at the start of the loop,
274 * that it arrived at the start of the next extension.
275 *
276 * If we are already on a non-digit, there is nothing to do. Either
277 * we have a multi-letter extension's _, or the start of an
278 * extension.
279 *
280 * Otherwise we have found the current extension's major version
281 * number. Parse past it, and a subsequent p/minor version number
282 * if present. The `p` extension must not appear immediately after
283 * a number, so there is no fear of missing it.
284 *
285 */
286 if (unlikely(!isalpha(*ext))) {
287 ext_err = true;
288 break;
289 }
290
291 if (!isdigit(*isa))
292 break;
293
294 while (isdigit(*++isa))
295 ;
296
297 if (tolower(*isa) != 'p')
298 break;
299
300 if (!isdigit(*++isa)) {
301 --isa;
302 break;
303 }
304
305 while (isdigit(*++isa))
306 ;
307
308 break;
309 }
310
311 /*
312 * The parser expects that at the start of an iteration isa points to the
313 * first character of the next extension. As we stop parsing an extension
314 * on meeting a non-alphanumeric character, an extra increment is needed
315 * where the succeeding extension is a multi-letter prefixed with an "_".
316 */
317 if (*isa == '_')
318 ++isa;
319
320 #define SET_ISA_EXT_MAP(name, bit) \
321 do { \
322 if ((ext_end - ext == strlen(name)) && \
323 !strncasecmp(ext, name, strlen(name)) && \
324 riscv_isa_extension_check(bit)) \
325 set_bit(bit, isainfo->isa); \
326 } while (false) \
327
328 if (unlikely(ext_err))
329 continue;
330 if (!ext_long) {
331 int nr = tolower(*ext) - 'a';
332
333 if (riscv_isa_extension_check(nr)) {
334 *this_hwcap |= isa2hwcap[nr];
335 set_bit(nr, isainfo->isa);
336 }
337 } else {
338 for (int i = 0; i < riscv_isa_ext_count; i++)
339 SET_ISA_EXT_MAP(riscv_isa_ext[i].name,
340 riscv_isa_ext[i].id);
341 }
342 #undef SET_ISA_EXT_MAP
343 }
344 }
345
riscv_fill_hwcap_from_isa_string(unsigned long * isa2hwcap)346 static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
347 {
348 struct device_node *node;
349 const char *isa;
350 int rc;
351 struct acpi_table_header *rhct;
352 acpi_status status;
353 unsigned int cpu;
354 u64 boot_vendorid;
355 u64 boot_archid;
356
357 if (!acpi_disabled) {
358 status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
359 if (ACPI_FAILURE(status))
360 return;
361 }
362
363 boot_vendorid = riscv_get_mvendorid();
364 boot_archid = riscv_get_marchid();
365
366 for_each_possible_cpu(cpu) {
367 struct riscv_isainfo *isainfo = &hart_isa[cpu];
368 unsigned long this_hwcap = 0;
369
370 if (acpi_disabled) {
371 node = of_cpu_device_node_get(cpu);
372 if (!node) {
373 pr_warn("Unable to find cpu node\n");
374 continue;
375 }
376
377 rc = of_property_read_string(node, "riscv,isa", &isa);
378 of_node_put(node);
379 if (rc) {
380 pr_warn("Unable to find \"riscv,isa\" devicetree entry\n");
381 continue;
382 }
383 } else {
384 rc = acpi_get_riscv_isa(rhct, cpu, &isa);
385 if (rc < 0) {
386 pr_warn("Unable to get ISA for the hart - %d\n", cpu);
387 continue;
388 }
389 }
390
391 riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa);
392
393 /*
394 * These ones were as they were part of the base ISA when the
395 * port & dt-bindings were upstreamed, and so can be set
396 * unconditionally where `i` is in riscv,isa on DT systems.
397 */
398 if (acpi_disabled) {
399 set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
400 set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
401 set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
402 set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
403 }
404
405 /*
406 * "V" in ISA strings is ambiguous in practice: it should mean
407 * just the standard V-1.0 but vendors aren't well behaved.
408 * Many vendors with T-Head CPU cores which implement the 0.7.1
409 * version of the vector specification put "v" into their DTs.
410 * CPU cores with the ratified spec will contain non-zero
411 * marchid.
412 */
413 if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) {
414 this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
415 clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
416 }
417
418 /*
419 * All "okay" hart should have same isa. Set HWCAP based on
420 * common capabilities of every "okay" hart, in case they don't
421 * have.
422 */
423 if (elf_hwcap)
424 elf_hwcap &= this_hwcap;
425 else
426 elf_hwcap = this_hwcap;
427
428 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
429 bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
430 else
431 bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
432 }
433
434 if (!acpi_disabled && rhct)
435 acpi_put_table((struct acpi_table_header *)rhct);
436 }
437
riscv_fill_hwcap_from_ext_list(unsigned long * isa2hwcap)438 static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
439 {
440 unsigned int cpu;
441
442 for_each_possible_cpu(cpu) {
443 unsigned long this_hwcap = 0;
444 struct device_node *cpu_node;
445 struct riscv_isainfo *isainfo = &hart_isa[cpu];
446
447 cpu_node = of_cpu_device_node_get(cpu);
448 if (!cpu_node) {
449 pr_warn("Unable to find cpu node\n");
450 continue;
451 }
452
453 if (!of_property_present(cpu_node, "riscv,isa-extensions")) {
454 of_node_put(cpu_node);
455 continue;
456 }
457
458 for (int i = 0; i < riscv_isa_ext_count; i++) {
459 if (of_property_match_string(cpu_node, "riscv,isa-extensions",
460 riscv_isa_ext[i].property) < 0)
461 continue;
462
463 if (!riscv_isa_extension_check(riscv_isa_ext[i].id))
464 continue;
465
466 /* Only single letter extensions get set in hwcap */
467 if (strnlen(riscv_isa_ext[i].name, 2) == 1)
468 this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
469
470 set_bit(riscv_isa_ext[i].id, isainfo->isa);
471 }
472
473 of_node_put(cpu_node);
474
475 /*
476 * All "okay" harts should have same isa. Set HWCAP based on
477 * common capabilities of every "okay" hart, in case they don't.
478 */
479 if (elf_hwcap)
480 elf_hwcap &= this_hwcap;
481 else
482 elf_hwcap = this_hwcap;
483
484 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
485 bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
486 else
487 bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
488 }
489
490 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
491 return -ENOENT;
492
493 return 0;
494 }
495
496 #ifdef CONFIG_RISCV_ISA_FALLBACK
497 bool __initdata riscv_isa_fallback = true;
498 #else
499 bool __initdata riscv_isa_fallback;
riscv_isa_fallback_setup(char * __unused)500 static int __init riscv_isa_fallback_setup(char *__unused)
501 {
502 riscv_isa_fallback = true;
503 return 1;
504 }
505 early_param("riscv_isa_fallback", riscv_isa_fallback_setup);
506 #endif
507
riscv_fill_hwcap(void)508 void __init riscv_fill_hwcap(void)
509 {
510 char print_str[NUM_ALPHA_EXTS + 1];
511 unsigned long isa2hwcap[26] = {0};
512 int i, j;
513
514 isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
515 isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
516 isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
517 isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
518 isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
519 isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
520 isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
521
522 if (!acpi_disabled) {
523 riscv_fill_hwcap_from_isa_string(isa2hwcap);
524 } else {
525 int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap);
526
527 if (ret && riscv_isa_fallback) {
528 pr_info("Falling back to deprecated \"riscv,isa\"\n");
529 riscv_fill_hwcap_from_isa_string(isa2hwcap);
530 }
531 }
532
533 /*
534 * We don't support systems with F but without D, so mask those out
535 * here.
536 */
537 if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
538 pr_info("This kernel does not support systems with F but not D\n");
539 elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
540 }
541
542 if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
543 riscv_v_setup_vsize();
544 /*
545 * ISA string in device tree might have 'v' flag, but
546 * CONFIG_RISCV_ISA_V is disabled in kernel.
547 * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled.
548 */
549 if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
550 elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
551 }
552
553 memset(print_str, 0, sizeof(print_str));
554 for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
555 if (riscv_isa[0] & BIT_MASK(i))
556 print_str[j++] = (char)('a' + i);
557 pr_info("riscv: base ISA extensions %s\n", print_str);
558
559 memset(print_str, 0, sizeof(print_str));
560 for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
561 if (elf_hwcap & BIT_MASK(i))
562 print_str[j++] = (char)('a' + i);
563 pr_info("riscv: ELF capabilities %s\n", print_str);
564 }
565
riscv_get_elf_hwcap(void)566 unsigned long riscv_get_elf_hwcap(void)
567 {
568 unsigned long hwcap;
569
570 hwcap = (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1));
571
572 if (!riscv_v_vstate_ctrl_user_allowed())
573 hwcap &= ~COMPAT_HWCAP_ISA_V;
574
575 return hwcap;
576 }
577
check_unaligned_access(int cpu)578 void check_unaligned_access(int cpu)
579 {
580 u64 start_cycles, end_cycles;
581 u64 word_cycles;
582 u64 byte_cycles;
583 int ratio;
584 unsigned long start_jiffies, now;
585 struct page *page;
586 void *dst;
587 void *src;
588 long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
589
590 /* We are already set since the last check */
591 if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
592 return;
593
594 page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
595 if (!page) {
596 pr_warn("Can't alloc pages to measure memcpy performance");
597 return;
598 }
599
600 /* Make an unaligned destination buffer. */
601 dst = (void *)((unsigned long)page_address(page) | 0x1);
602 /* Unalign src as well, but differently (off by 1 + 2 = 3). */
603 src = dst + (MISALIGNED_BUFFER_SIZE / 2);
604 src += 2;
605 word_cycles = -1ULL;
606 /* Do a warmup. */
607 __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
608 preempt_disable();
609 start_jiffies = jiffies;
610 while ((now = jiffies) == start_jiffies)
611 cpu_relax();
612
613 /*
614 * For a fixed amount of time, repeatedly try the function, and take
615 * the best time in cycles as the measurement.
616 */
617 while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
618 start_cycles = get_cycles64();
619 /* Ensure the CSR read can't reorder WRT to the copy. */
620 mb();
621 __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
622 /* Ensure the copy ends before the end time is snapped. */
623 mb();
624 end_cycles = get_cycles64();
625 if ((end_cycles - start_cycles) < word_cycles)
626 word_cycles = end_cycles - start_cycles;
627 }
628
629 byte_cycles = -1ULL;
630 __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
631 start_jiffies = jiffies;
632 while ((now = jiffies) == start_jiffies)
633 cpu_relax();
634
635 while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
636 start_cycles = get_cycles64();
637 mb();
638 __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
639 mb();
640 end_cycles = get_cycles64();
641 if ((end_cycles - start_cycles) < byte_cycles)
642 byte_cycles = end_cycles - start_cycles;
643 }
644
645 preempt_enable();
646
647 /* Don't divide by zero. */
648 if (!word_cycles || !byte_cycles) {
649 pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
650 cpu);
651
652 goto out;
653 }
654
655 if (word_cycles < byte_cycles)
656 speed = RISCV_HWPROBE_MISALIGNED_FAST;
657
658 ratio = div_u64((byte_cycles * 100), word_cycles);
659 pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
660 cpu,
661 ratio / 100,
662 ratio % 100,
663 (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
664
665 per_cpu(misaligned_access_speed, cpu) = speed;
666
667 out:
668 __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
669 }
670
check_unaligned_access_boot_cpu(void)671 static int check_unaligned_access_boot_cpu(void)
672 {
673 check_unaligned_access(0);
674 return 0;
675 }
676
677 arch_initcall(check_unaligned_access_boot_cpu);
678
679 #ifdef CONFIG_RISCV_ALTERNATIVE
680 /*
681 * Alternative patch sites consider 48 bits when determining when to patch
682 * the old instruction sequence with the new. These bits are broken into a
683 * 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the
684 * patch site is for an erratum, identified by the 32-bit patch ID. When
685 * the vendor ID is zero, the patch site is for a cpufeature. cpufeatures
686 * further break down patch ID into two 16-bit numbers. The lower 16 bits
687 * are the cpufeature ID and the upper 16 bits are used for a value specific
688 * to the cpufeature and patch site. If the upper 16 bits are zero, then it
689 * implies no specific value is specified. cpufeatures that want to control
690 * patching on a per-site basis will provide non-zero values and implement
691 * checks here. The checks return true when patching should be done, and
692 * false otherwise.
693 */
riscv_cpufeature_patch_check(u16 id,u16 value)694 static bool riscv_cpufeature_patch_check(u16 id, u16 value)
695 {
696 if (!value)
697 return true;
698
699 switch (id) {
700 case RISCV_ISA_EXT_ZICBOZ:
701 /*
702 * Zicboz alternative applications provide the maximum
703 * supported block size order, or zero when it doesn't
704 * matter. If the current block size exceeds the maximum,
705 * then the alternative cannot be applied.
706 */
707 return riscv_cboz_block_size <= (1U << value);
708 }
709
710 return false;
711 }
712
riscv_cpufeature_patch_func(struct alt_entry * begin,struct alt_entry * end,unsigned int stage)713 void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
714 struct alt_entry *end,
715 unsigned int stage)
716 {
717 struct alt_entry *alt;
718 void *oldptr, *altptr;
719 u16 id, value;
720
721 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
722 return;
723
724 for (alt = begin; alt < end; alt++) {
725 if (alt->vendor_id != 0)
726 continue;
727
728 id = PATCH_ID_CPUFEATURE_ID(alt->patch_id);
729
730 if (id >= RISCV_ISA_EXT_MAX) {
731 WARN(1, "This extension id:%d is not in ISA extension list", id);
732 continue;
733 }
734
735 if (!__riscv_isa_extension_available(NULL, id))
736 continue;
737
738 value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
739 if (!riscv_cpufeature_patch_check(id, value))
740 continue;
741
742 oldptr = ALT_OLD_PTR(alt);
743 altptr = ALT_ALT_PTR(alt);
744
745 mutex_lock(&text_mutex);
746 patch_text_nosync(oldptr, altptr, alt->alt_len);
747 riscv_alternative_fix_offsets(oldptr, alt->alt_len, oldptr - altptr);
748 mutex_unlock(&text_mutex);
749 }
750 }
751 #endif
752