1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SBI initialilization and all extension implementation. 4 * 5 * Copyright (c) 2020 Western Digital Corporation or its affiliates. 6 */ 7 8 #include <linux/init.h> 9 #include <linux/pm.h> 10 #include <linux/reboot.h> 11 #include <asm/sbi.h> 12 #include <asm/smp.h> 13 14 /* default SBI version is 0.1 */ 15 unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; 16 EXPORT_SYMBOL(sbi_spec_version); 17 18 static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init; 19 static int (*__sbi_send_ipi)(const struct cpumask *cpu_mask) __ro_after_init; 20 static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask, 21 unsigned long start, unsigned long size, 22 unsigned long arg4, unsigned long arg5) __ro_after_init; 23 24 struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, 25 unsigned long arg1, unsigned long arg2, 26 unsigned long arg3, unsigned long arg4, 27 unsigned long arg5) 28 { 29 struct sbiret ret; 30 31 register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); 32 register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); 33 register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); 34 register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); 35 register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4); 36 register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5); 37 register uintptr_t a6 asm ("a6") = (uintptr_t)(fid); 38 register uintptr_t a7 asm ("a7") = (uintptr_t)(ext); 39 asm volatile ("ecall" 40 : "+r" (a0), "+r" (a1) 41 : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7) 42 : "memory"); 43 ret.error = a0; 44 ret.value = a1; 45 46 return ret; 47 } 48 EXPORT_SYMBOL(sbi_ecall); 49 50 int sbi_err_map_linux_errno(int err) 51 { 52 switch (err) { 53 case SBI_SUCCESS: 54 return 0; 55 case SBI_ERR_DENIED: 56 return -EPERM; 57 case SBI_ERR_INVALID_PARAM: 58 return -EINVAL; 59 case SBI_ERR_INVALID_ADDRESS: 60 return -EFAULT; 61 case SBI_ERR_NOT_SUPPORTED: 62 case SBI_ERR_FAILURE: 63 default: 64 return -ENOTSUPP; 65 }; 66 } 67 EXPORT_SYMBOL(sbi_err_map_linux_errno); 68 69 #ifdef CONFIG_RISCV_SBI_V01 70 static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask) 71 { 72 unsigned long cpuid, hartid; 73 unsigned long hmask = 0; 74 75 /* 76 * There is no maximum hartid concept in RISC-V and NR_CPUS must not be 77 * associated with hartid. As SBI v0.1 is only kept for backward compatibility 78 * and will be removed in the future, there is no point in supporting hartid 79 * greater than BITS_PER_LONG (32 for RV32 and 64 for RV64). Ideally, SBI v0.2 80 * should be used for platforms with hartid greater than BITS_PER_LONG. 81 */ 82 for_each_cpu(cpuid, cpu_mask) { 83 hartid = cpuid_to_hartid_map(cpuid); 84 if (hartid >= BITS_PER_LONG) { 85 pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n"); 86 break; 87 } 88 hmask |= 1 << hartid; 89 } 90 91 return hmask; 92 } 93 94 /** 95 * sbi_console_putchar() - Writes given character to the console device. 96 * @ch: The data to be written to the console. 97 * 98 * Return: None 99 */ 100 void sbi_console_putchar(int ch) 101 { 102 sbi_ecall(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, ch, 0, 0, 0, 0, 0); 103 } 104 EXPORT_SYMBOL(sbi_console_putchar); 105 106 /** 107 * sbi_console_getchar() - Reads a byte from console device. 108 * 109 * Returns the value read from console. 110 */ 111 int sbi_console_getchar(void) 112 { 113 struct sbiret ret; 114 115 ret = sbi_ecall(SBI_EXT_0_1_CONSOLE_GETCHAR, 0, 0, 0, 0, 0, 0, 0); 116 117 return ret.error; 118 } 119 EXPORT_SYMBOL(sbi_console_getchar); 120 121 /** 122 * sbi_shutdown() - Remove all the harts from executing supervisor code. 123 * 124 * Return: None 125 */ 126 void sbi_shutdown(void) 127 { 128 sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0); 129 } 130 EXPORT_SYMBOL(sbi_shutdown); 131 132 /** 133 * sbi_clear_ipi() - Clear any pending IPIs for the calling hart. 134 * 135 * Return: None 136 */ 137 void sbi_clear_ipi(void) 138 { 139 sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0); 140 } 141 EXPORT_SYMBOL(sbi_clear_ipi); 142 143 /** 144 * __sbi_set_timer_v01() - Program the timer for next timer event. 145 * @stime_value: The value after which next timer event should fire. 146 * 147 * Return: None 148 */ 149 static void __sbi_set_timer_v01(uint64_t stime_value) 150 { 151 #if __riscv_xlen == 32 152 sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 153 stime_value >> 32, 0, 0, 0, 0); 154 #else 155 sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 0, 0, 0, 0, 0); 156 #endif 157 } 158 159 static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask) 160 { 161 unsigned long hart_mask; 162 163 if (!cpu_mask) 164 cpu_mask = cpu_online_mask; 165 hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask); 166 167 sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask), 168 0, 0, 0, 0, 0); 169 return 0; 170 } 171 172 static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, 173 unsigned long start, unsigned long size, 174 unsigned long arg4, unsigned long arg5) 175 { 176 int result = 0; 177 unsigned long hart_mask; 178 179 if (!cpu_mask) 180 cpu_mask = cpu_online_mask; 181 hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask); 182 183 /* v0.2 function IDs are equivalent to v0.1 extension IDs */ 184 switch (fid) { 185 case SBI_EXT_RFENCE_REMOTE_FENCE_I: 186 sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0, 187 (unsigned long)&hart_mask, 0, 0, 0, 0, 0); 188 break; 189 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: 190 sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0, 191 (unsigned long)&hart_mask, start, size, 192 0, 0, 0); 193 break; 194 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: 195 sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0, 196 (unsigned long)&hart_mask, start, size, 197 arg4, 0, 0); 198 break; 199 default: 200 pr_err("SBI call [%d]not supported in SBI v0.1\n", fid); 201 result = -EINVAL; 202 } 203 204 return result; 205 } 206 207 static void sbi_set_power_off(void) 208 { 209 pm_power_off = sbi_shutdown; 210 } 211 #else 212 static void __sbi_set_timer_v01(uint64_t stime_value) 213 { 214 pr_warn("Timer extension is not available in SBI v%lu.%lu\n", 215 sbi_major_version(), sbi_minor_version()); 216 } 217 218 static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask) 219 { 220 pr_warn("IPI extension is not available in SBI v%lu.%lu\n", 221 sbi_major_version(), sbi_minor_version()); 222 223 return 0; 224 } 225 226 static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, 227 unsigned long start, unsigned long size, 228 unsigned long arg4, unsigned long arg5) 229 { 230 pr_warn("remote fence extension is not available in SBI v%lu.%lu\n", 231 sbi_major_version(), sbi_minor_version()); 232 233 return 0; 234 } 235 236 static void sbi_set_power_off(void) {} 237 #endif /* CONFIG_RISCV_SBI_V01 */ 238 239 static void __sbi_set_timer_v02(uint64_t stime_value) 240 { 241 #if __riscv_xlen == 32 242 sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 243 stime_value >> 32, 0, 0, 0, 0); 244 #else 245 sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 0, 246 0, 0, 0, 0); 247 #endif 248 } 249 250 static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask) 251 { 252 unsigned long hartid, cpuid, hmask = 0, hbase = 0; 253 struct sbiret ret = {0}; 254 int result; 255 256 if (!cpu_mask) 257 cpu_mask = cpu_online_mask; 258 259 for_each_cpu(cpuid, cpu_mask) { 260 hartid = cpuid_to_hartid_map(cpuid); 261 if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) { 262 ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI, 263 hmask, hbase, 0, 0, 0, 0); 264 if (ret.error) 265 goto ecall_failed; 266 hmask = 0; 267 hbase = 0; 268 } 269 if (!hmask) 270 hbase = hartid; 271 hmask |= 1UL << (hartid - hbase); 272 } 273 274 if (hmask) { 275 ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI, 276 hmask, hbase, 0, 0, 0, 0); 277 if (ret.error) 278 goto ecall_failed; 279 } 280 281 return 0; 282 283 ecall_failed: 284 result = sbi_err_map_linux_errno(ret.error); 285 pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n", 286 __func__, hbase, hmask, result); 287 return result; 288 } 289 290 static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask, 291 unsigned long hbase, unsigned long start, 292 unsigned long size, unsigned long arg4, 293 unsigned long arg5) 294 { 295 struct sbiret ret = {0}; 296 int ext = SBI_EXT_RFENCE; 297 int result = 0; 298 299 switch (fid) { 300 case SBI_EXT_RFENCE_REMOTE_FENCE_I: 301 ret = sbi_ecall(ext, fid, hmask, hbase, 0, 0, 0, 0); 302 break; 303 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: 304 ret = sbi_ecall(ext, fid, hmask, hbase, start, 305 size, 0, 0); 306 break; 307 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: 308 ret = sbi_ecall(ext, fid, hmask, hbase, start, 309 size, arg4, 0); 310 break; 311 312 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA: 313 ret = sbi_ecall(ext, fid, hmask, hbase, start, 314 size, 0, 0); 315 break; 316 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID: 317 ret = sbi_ecall(ext, fid, hmask, hbase, start, 318 size, arg4, 0); 319 break; 320 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA: 321 ret = sbi_ecall(ext, fid, hmask, hbase, start, 322 size, 0, 0); 323 break; 324 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID: 325 ret = sbi_ecall(ext, fid, hmask, hbase, start, 326 size, arg4, 0); 327 break; 328 default: 329 pr_err("unknown function ID [%lu] for SBI extension [%d]\n", 330 fid, ext); 331 result = -EINVAL; 332 } 333 334 if (ret.error) { 335 result = sbi_err_map_linux_errno(ret.error); 336 pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n", 337 __func__, hbase, hmask, result); 338 } 339 340 return result; 341 } 342 343 static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask, 344 unsigned long start, unsigned long size, 345 unsigned long arg4, unsigned long arg5) 346 { 347 unsigned long hartid, cpuid, hmask = 0, hbase = 0; 348 int result; 349 350 if (!cpu_mask) 351 cpu_mask = cpu_online_mask; 352 353 for_each_cpu(cpuid, cpu_mask) { 354 hartid = cpuid_to_hartid_map(cpuid); 355 if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) { 356 result = __sbi_rfence_v02_call(fid, hmask, hbase, 357 start, size, arg4, arg5); 358 if (result) 359 return result; 360 hmask = 0; 361 hbase = 0; 362 } 363 if (!hmask) 364 hbase = hartid; 365 hmask |= 1UL << (hartid - hbase); 366 } 367 368 if (hmask) { 369 result = __sbi_rfence_v02_call(fid, hmask, hbase, 370 start, size, arg4, arg5); 371 if (result) 372 return result; 373 } 374 375 return 0; 376 } 377 378 /** 379 * sbi_set_timer() - Program the timer for next timer event. 380 * @stime_value: The value after which next timer event should fire. 381 * 382 * Return: None. 383 */ 384 void sbi_set_timer(uint64_t stime_value) 385 { 386 __sbi_set_timer(stime_value); 387 } 388 389 /** 390 * sbi_send_ipi() - Send an IPI to any hart. 391 * @cpu_mask: A cpu mask containing all the target harts. 392 * 393 * Return: 0 on success, appropriate linux error code otherwise. 394 */ 395 int sbi_send_ipi(const struct cpumask *cpu_mask) 396 { 397 return __sbi_send_ipi(cpu_mask); 398 } 399 EXPORT_SYMBOL(sbi_send_ipi); 400 401 /** 402 * sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts. 403 * @cpu_mask: A cpu mask containing all the target harts. 404 * 405 * Return: 0 on success, appropriate linux error code otherwise. 406 */ 407 int sbi_remote_fence_i(const struct cpumask *cpu_mask) 408 { 409 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I, 410 cpu_mask, 0, 0, 0, 0); 411 } 412 EXPORT_SYMBOL(sbi_remote_fence_i); 413 414 /** 415 * sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote 416 * harts for the specified virtual address range. 417 * @cpu_mask: A cpu mask containing all the target harts. 418 * @start: Start of the virtual address 419 * @size: Total size of the virtual address range. 420 * 421 * Return: 0 on success, appropriate linux error code otherwise. 422 */ 423 int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, 424 unsigned long start, 425 unsigned long size) 426 { 427 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, 428 cpu_mask, start, size, 0, 0); 429 } 430 EXPORT_SYMBOL(sbi_remote_sfence_vma); 431 432 /** 433 * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given 434 * remote harts for a virtual address range belonging to a specific ASID. 435 * 436 * @cpu_mask: A cpu mask containing all the target harts. 437 * @start: Start of the virtual address 438 * @size: Total size of the virtual address range. 439 * @asid: The value of address space identifier (ASID). 440 * 441 * Return: 0 on success, appropriate linux error code otherwise. 442 */ 443 int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, 444 unsigned long start, 445 unsigned long size, 446 unsigned long asid) 447 { 448 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, 449 cpu_mask, start, size, asid, 0); 450 } 451 EXPORT_SYMBOL(sbi_remote_sfence_vma_asid); 452 453 /** 454 * sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote 455 * harts for the specified guest physical address range. 456 * @cpu_mask: A cpu mask containing all the target harts. 457 * @start: Start of the guest physical address 458 * @size: Total size of the guest physical address range. 459 * 460 * Return: None 461 */ 462 int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask, 463 unsigned long start, 464 unsigned long size) 465 { 466 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA, 467 cpu_mask, start, size, 0, 0); 468 } 469 EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma); 470 471 /** 472 * sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given 473 * remote harts for a guest physical address range belonging to a specific VMID. 474 * 475 * @cpu_mask: A cpu mask containing all the target harts. 476 * @start: Start of the guest physical address 477 * @size: Total size of the guest physical address range. 478 * @vmid: The value of guest ID (VMID). 479 * 480 * Return: 0 if success, Error otherwise. 481 */ 482 int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask, 483 unsigned long start, 484 unsigned long size, 485 unsigned long vmid) 486 { 487 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID, 488 cpu_mask, start, size, vmid, 0); 489 } 490 EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid); 491 492 /** 493 * sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote 494 * harts for the current guest virtual address range. 495 * @cpu_mask: A cpu mask containing all the target harts. 496 * @start: Start of the current guest virtual address 497 * @size: Total size of the current guest virtual address range. 498 * 499 * Return: None 500 */ 501 int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask, 502 unsigned long start, 503 unsigned long size) 504 { 505 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA, 506 cpu_mask, start, size, 0, 0); 507 } 508 EXPORT_SYMBOL(sbi_remote_hfence_vvma); 509 510 /** 511 * sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given 512 * remote harts for current guest virtual address range belonging to a specific 513 * ASID. 514 * 515 * @cpu_mask: A cpu mask containing all the target harts. 516 * @start: Start of the current guest virtual address 517 * @size: Total size of the current guest virtual address range. 518 * @asid: The value of address space identifier (ASID). 519 * 520 * Return: None 521 */ 522 int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, 523 unsigned long start, 524 unsigned long size, 525 unsigned long asid) 526 { 527 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID, 528 cpu_mask, start, size, asid, 0); 529 } 530 EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid); 531 532 static void sbi_srst_reset(unsigned long type, unsigned long reason) 533 { 534 sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason, 535 0, 0, 0, 0); 536 pr_warn("%s: type=0x%lx reason=0x%lx failed\n", 537 __func__, type, reason); 538 } 539 540 static int sbi_srst_reboot(struct notifier_block *this, 541 unsigned long mode, void *cmd) 542 { 543 sbi_srst_reset((mode == REBOOT_WARM || mode == REBOOT_SOFT) ? 544 SBI_SRST_RESET_TYPE_WARM_REBOOT : 545 SBI_SRST_RESET_TYPE_COLD_REBOOT, 546 SBI_SRST_RESET_REASON_NONE); 547 return NOTIFY_DONE; 548 } 549 550 static struct notifier_block sbi_srst_reboot_nb; 551 552 static void sbi_srst_power_off(void) 553 { 554 sbi_srst_reset(SBI_SRST_RESET_TYPE_SHUTDOWN, 555 SBI_SRST_RESET_REASON_NONE); 556 } 557 558 /** 559 * sbi_probe_extension() - Check if an SBI extension ID is supported or not. 560 * @extid: The extension ID to be probed. 561 * 562 * Return: Extension specific nonzero value f yes, -ENOTSUPP otherwise. 563 */ 564 int sbi_probe_extension(int extid) 565 { 566 struct sbiret ret; 567 568 ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid, 569 0, 0, 0, 0, 0); 570 if (!ret.error) 571 if (ret.value) 572 return ret.value; 573 574 return -ENOTSUPP; 575 } 576 EXPORT_SYMBOL(sbi_probe_extension); 577 578 static long __sbi_base_ecall(int fid) 579 { 580 struct sbiret ret; 581 582 ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0); 583 if (!ret.error) 584 return ret.value; 585 else 586 return sbi_err_map_linux_errno(ret.error); 587 } 588 589 static inline long sbi_get_spec_version(void) 590 { 591 return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION); 592 } 593 594 static inline long sbi_get_firmware_id(void) 595 { 596 return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID); 597 } 598 599 static inline long sbi_get_firmware_version(void) 600 { 601 return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION); 602 } 603 604 long sbi_get_mvendorid(void) 605 { 606 return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID); 607 } 608 609 long sbi_get_marchid(void) 610 { 611 return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID); 612 } 613 614 long sbi_get_mimpid(void) 615 { 616 return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID); 617 } 618 619 static void sbi_send_cpumask_ipi(const struct cpumask *target) 620 { 621 sbi_send_ipi(target); 622 } 623 624 static const struct riscv_ipi_ops sbi_ipi_ops = { 625 .ipi_inject = sbi_send_cpumask_ipi 626 }; 627 628 void __init sbi_init(void) 629 { 630 int ret; 631 632 sbi_set_power_off(); 633 ret = sbi_get_spec_version(); 634 if (ret > 0) 635 sbi_spec_version = ret; 636 637 pr_info("SBI specification v%lu.%lu detected\n", 638 sbi_major_version(), sbi_minor_version()); 639 640 if (!sbi_spec_is_0_1()) { 641 pr_info("SBI implementation ID=0x%lx Version=0x%lx\n", 642 sbi_get_firmware_id(), sbi_get_firmware_version()); 643 if (sbi_probe_extension(SBI_EXT_TIME) > 0) { 644 __sbi_set_timer = __sbi_set_timer_v02; 645 pr_info("SBI TIME extension detected\n"); 646 } else { 647 __sbi_set_timer = __sbi_set_timer_v01; 648 } 649 if (sbi_probe_extension(SBI_EXT_IPI) > 0) { 650 __sbi_send_ipi = __sbi_send_ipi_v02; 651 pr_info("SBI IPI extension detected\n"); 652 } else { 653 __sbi_send_ipi = __sbi_send_ipi_v01; 654 } 655 if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) { 656 __sbi_rfence = __sbi_rfence_v02; 657 pr_info("SBI RFENCE extension detected\n"); 658 } else { 659 __sbi_rfence = __sbi_rfence_v01; 660 } 661 if ((sbi_spec_version >= sbi_mk_version(0, 3)) && 662 (sbi_probe_extension(SBI_EXT_SRST) > 0)) { 663 pr_info("SBI SRST extension detected\n"); 664 pm_power_off = sbi_srst_power_off; 665 sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot; 666 sbi_srst_reboot_nb.priority = 192; 667 register_restart_handler(&sbi_srst_reboot_nb); 668 } 669 } else { 670 __sbi_set_timer = __sbi_set_timer_v01; 671 __sbi_send_ipi = __sbi_send_ipi_v01; 672 __sbi_rfence = __sbi_rfence_v01; 673 } 674 675 riscv_set_ipi_ops(&sbi_ipi_ops); 676 } 677