Lines Matching +full:multi +full:- +full:cluster

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018 Chen-Yu Tsai
5 * Chen-Yu Tsai <wens@csie.org>
7 * arch/arm/mach-sunxi/mc_smp.c
9 * Based on Allwinner code, arch/arm/mach-exynos/mcpm-exynos.c, and
10 * arch/arm/mach-hisi/platmcpm.c
11 * Cluster cache enable trampoline code adapted from MCPM framework
14 #include <linux/arm-cci.h>
19 #include <linux/irqchip/arm-gic.h>
70 /* R_CPUCFG registers, specific to sun8i-a83t */
87 static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster) in sunxi_core_is_cortex_a15() argument
90 int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; in sunxi_core_is_cortex_a15()
102 * would be mid way in a core or cluster power sequence. in sunxi_core_is_cortex_a15()
104 pr_err("%s: Couldn't get CPU cluster %u core %u device node\n", in sunxi_core_is_cortex_a15()
105 __func__, cluster, core); in sunxi_core_is_cortex_a15()
110 is_compatible = of_device_is_compatible(node, "arm,cortex-a15"); in sunxi_core_is_cortex_a15()
115 static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster, in sunxi_cpu_power_switch_set() argument
121 reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
124 pr_debug("power clamp for cluster %u cpu %u already open\n", in sunxi_cpu_power_switch_set()
125 cluster, cpu); in sunxi_cpu_power_switch_set()
129 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
131 writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
133 writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
135 writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
137 writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
140 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
158 static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster) in sunxi_cpu_powerup() argument
162 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); in sunxi_cpu_powerup()
163 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) in sunxi_cpu_powerup()
164 return -EINVAL; in sunxi_cpu_powerup()
167 if (cluster == 0 && cpu == 0) in sunxi_cpu_powerup()
170 /* assert processor power-on reset */ in sunxi_cpu_powerup()
171 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
173 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
176 /* assert cpu power-on reset */ in sunxi_cpu_powerup()
178 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
181 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
185 /* Cortex-A7: hold L1 reset disable signal low */ in sunxi_cpu_powerup()
186 if (!sunxi_core_is_cortex_a15(cpu, cluster)) { in sunxi_cpu_powerup()
187 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cpu_powerup()
189 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cpu_powerup()
193 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
198 * to ARM manuals, asserting power-on reset is sufficient. in sunxi_cpu_powerup()
200 if (!sunxi_core_is_cortex_a15(cpu, cluster)) in sunxi_cpu_powerup()
203 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
206 sunxi_cpu_power_switch_set(cpu, cluster, true); in sunxi_cpu_powerup()
215 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerup()
217 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerup()
226 /* de-assert processor power-on reset */ in sunxi_cpu_powerup()
227 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
229 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
233 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
236 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
240 /* de-assert all processor resets */ in sunxi_cpu_powerup()
241 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
244 if (!sunxi_core_is_cortex_a15(cpu, cluster)) in sunxi_cpu_powerup()
248 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
253 static int sunxi_cluster_powerup(unsigned int cluster) in sunxi_cluster_powerup() argument
257 pr_debug("%s: cluster %u\n", __func__, cluster); in sunxi_cluster_powerup()
258 if (cluster >= SUNXI_NR_CLUSTERS) in sunxi_cluster_powerup()
259 return -EINVAL; in sunxi_cluster_powerup()
261 /* For A83T, assert cluster cores resets */ in sunxi_cluster_powerup()
263 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
265 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
270 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
272 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
274 /* assert cluster processor power-on resets */ in sunxi_cluster_powerup()
275 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
277 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
279 /* assert cluster cores resets */ in sunxi_cluster_powerup()
282 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
285 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
289 /* assert cluster resets */ in sunxi_cluster_powerup()
290 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
298 * to ARM manuals, asserting power-on reset is sufficient. in sunxi_cluster_powerup()
300 if (!sunxi_core_is_cortex_a15(0, cluster)) in sunxi_cluster_powerup()
303 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
306 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cluster_powerup()
307 if (sunxi_core_is_cortex_a15(0, cluster)) { in sunxi_cluster_powerup()
308 /* Cortex-A15: hold L2RSTDISABLE low */ in sunxi_cluster_powerup()
311 /* Cortex-A7: hold L1RSTDISABLE and L2RSTDISABLE low */ in sunxi_cluster_powerup()
315 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cluster_powerup()
317 /* clear cluster power gate */ in sunxi_cluster_powerup()
318 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerup()
323 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerup()
326 /* de-assert cluster resets */ in sunxi_cluster_powerup()
327 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
331 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
333 /* de-assert ACINACTM */ in sunxi_cluster_powerup()
334 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
336 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
343 * enable CCI-400 and proper cluster cache disable before power down.
349 * On the Cortex-A15 we need to disable in sunxi_cluster_cache_disable_without_axi()
359 /* Flush all cache levels for this cluster. */ in sunxi_cluster_cache_disable_without_axi()
363 * Disable cluster-level coherency by masking in sunxi_cluster_cache_disable_without_axi()
374 static bool sunxi_mc_smp_cluster_is_down(unsigned int cluster) in sunxi_mc_smp_cluster_is_down() argument
379 if (sunxi_mc_smp_cpu_table[cluster][i]) in sunxi_mc_smp_cluster_is_down()
393 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_boot_secondary() local
397 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_boot_secondary()
400 return -ENODEV; in sunxi_mc_smp_boot_secondary()
401 if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) in sunxi_mc_smp_boot_secondary()
402 return -EINVAL; in sunxi_mc_smp_boot_secondary()
406 if (sunxi_mc_smp_cpu_table[cluster][cpu]) in sunxi_mc_smp_boot_secondary()
409 if (sunxi_mc_smp_cluster_is_down(cluster)) { in sunxi_mc_smp_boot_secondary()
411 sunxi_cluster_powerup(cluster); in sunxi_mc_smp_boot_secondary()
418 sunxi_cpu_powerup(cpu, cluster); in sunxi_mc_smp_boot_secondary()
421 sunxi_mc_smp_cpu_table[cluster][cpu]++; in sunxi_mc_smp_boot_secondary()
430 unsigned int cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1); in sunxi_cluster_cache_disable() local
433 pr_debug("%s: cluster %u\n", __func__, cluster); in sunxi_cluster_cache_disable()
438 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_cache_disable()
440 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_cache_disable()
445 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_cpu_die() local
450 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_cpu_die()
451 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); in sunxi_mc_smp_cpu_die()
454 sunxi_mc_smp_cpu_table[cluster][cpu]--; in sunxi_mc_smp_cpu_die()
455 if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) { in sunxi_mc_smp_cpu_die()
461 } else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) { in sunxi_mc_smp_cpu_die()
462 pr_err("Cluster %d CPU%d boots multiple times\n", in sunxi_mc_smp_cpu_die()
463 cluster, cpu); in sunxi_mc_smp_cpu_die()
467 last_man = sunxi_mc_smp_cluster_is_down(cluster); in sunxi_mc_smp_cpu_die()
480 static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster) in sunxi_cpu_powerdown() argument
485 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); in sunxi_cpu_powerdown()
486 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) in sunxi_cpu_powerdown()
487 return -EINVAL; in sunxi_cpu_powerdown()
493 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerdown()
495 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerdown()
499 sunxi_cpu_power_switch_set(cpu, cluster, false); in sunxi_cpu_powerdown()
504 static int sunxi_cluster_powerdown(unsigned int cluster) in sunxi_cluster_powerdown() argument
508 pr_debug("%s: cluster %u\n", __func__, cluster); in sunxi_cluster_powerdown()
509 if (cluster >= SUNXI_NR_CLUSTERS) in sunxi_cluster_powerdown()
510 return -EINVAL; in sunxi_cluster_powerdown()
512 /* assert cluster resets or system will hang */ in sunxi_cluster_powerdown()
513 pr_debug("%s: assert cluster reset\n", __func__); in sunxi_cluster_powerdown()
514 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerdown()
518 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerdown()
520 /* gate cluster power */ in sunxi_cluster_powerdown()
521 pr_debug("%s: gate cluster power\n", __func__); in sunxi_cluster_powerdown()
522 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerdown()
527 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerdown()
535 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_cpu_kill() local
542 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_cpu_kill()
545 if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS || in sunxi_mc_smp_cpu_kill()
564 if (sunxi_mc_smp_cpu_table[cluster][cpu]) in sunxi_mc_smp_cpu_kill()
567 reg = readl(cpucfg_base + CPUCFG_CX_STATUS(cluster)); in sunxi_mc_smp_cpu_kill()
578 sunxi_cpu_powerdown(cpu, cluster); in sunxi_mc_smp_cpu_kill()
580 if (!sunxi_mc_smp_cluster_is_down(cluster)) in sunxi_mc_smp_cpu_kill()
583 /* wait for cluster L2 WFI */ in sunxi_mc_smp_cpu_kill()
584 ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg, in sunxi_mc_smp_cpu_kill()
589 * Ignore timeout on the cluster. Leaving the cluster on in sunxi_mc_smp_cpu_kill()
598 /* Power down cluster */ in sunxi_mc_smp_cpu_kill()
599 sunxi_cluster_powerdown(cluster); in sunxi_mc_smp_cpu_kill()
603 pr_debug("%s: cluster %u cpu %u powerdown: %d\n", in sunxi_mc_smp_cpu_kill()
604 __func__, cluster, cpu, ret); in sunxi_mc_smp_cpu_kill()
610 /* CPU0 hotplug not handled for sun8i-a83t */ in sunxi_mc_smp_cpu_can_disable()
630 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_cpu_table_init() local
634 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_cpu_table_init()
636 if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) { in sunxi_mc_smp_cpu_table_init()
640 sunxi_mc_smp_cpu_table[cluster][cpu] = 1; in sunxi_mc_smp_cpu_table_init()
647 * We need the trampoline code to enable CCI-400 on the first cluster
668 * We're going to soft-restart the current CPU through the in sunxi_mc_smp_loopback()
669 * low-level MCPM code by leveraging the suspend/resume in sunxi_mc_smp_loopback()
699 /* This structure holds SoC-specific bits tied to an enable-method string. */
708 of_node_put(nodes->prcm_node); in sunxi_mc_smp_put_nodes()
709 of_node_put(nodes->cpucfg_node); in sunxi_mc_smp_put_nodes()
710 of_node_put(nodes->sram_node); in sunxi_mc_smp_put_nodes()
711 of_node_put(nodes->r_cpucfg_node); in sunxi_mc_smp_put_nodes()
717 nodes->prcm_node = of_find_compatible_node(NULL, NULL, in sun9i_a80_get_smp_nodes()
718 "allwinner,sun9i-a80-prcm"); in sun9i_a80_get_smp_nodes()
719 if (!nodes->prcm_node) { in sun9i_a80_get_smp_nodes()
721 return -ENODEV; in sun9i_a80_get_smp_nodes()
724 nodes->cpucfg_node = of_find_compatible_node(NULL, NULL, in sun9i_a80_get_smp_nodes()
725 "allwinner,sun9i-a80-cpucfg"); in sun9i_a80_get_smp_nodes()
726 if (!nodes->cpucfg_node) { in sun9i_a80_get_smp_nodes()
728 return -ENODEV; in sun9i_a80_get_smp_nodes()
731 nodes->sram_node = of_find_compatible_node(NULL, NULL, in sun9i_a80_get_smp_nodes()
732 "allwinner,sun9i-a80-smp-sram"); in sun9i_a80_get_smp_nodes()
733 if (!nodes->sram_node) { in sun9i_a80_get_smp_nodes()
735 return -ENODEV; in sun9i_a80_get_smp_nodes()
743 nodes->prcm_node = of_find_compatible_node(NULL, NULL, in sun8i_a83t_get_smp_nodes()
744 "allwinner,sun8i-a83t-r-ccu"); in sun8i_a83t_get_smp_nodes()
745 if (!nodes->prcm_node) { in sun8i_a83t_get_smp_nodes()
747 return -ENODEV; in sun8i_a83t_get_smp_nodes()
750 nodes->cpucfg_node = of_find_compatible_node(NULL, NULL, in sun8i_a83t_get_smp_nodes()
751 "allwinner,sun8i-a83t-cpucfg"); in sun8i_a83t_get_smp_nodes()
752 if (!nodes->cpucfg_node) { in sun8i_a83t_get_smp_nodes()
754 return -ENODEV; in sun8i_a83t_get_smp_nodes()
757 nodes->r_cpucfg_node = of_find_compatible_node(NULL, NULL, in sun8i_a83t_get_smp_nodes()
758 "allwinner,sun8i-a83t-r-cpucfg"); in sun8i_a83t_get_smp_nodes()
759 if (!nodes->r_cpucfg_node) { in sun8i_a83t_get_smp_nodes()
761 return -ENODEV; in sun8i_a83t_get_smp_nodes()
769 .enable_method = "allwinner,sun9i-a80-smp",
773 .enable_method = "allwinner,sun8i-a83t-smp",
788 * Don't bother checking the "cpus" node, as an enable-method in sunxi_mc_smp_init()
793 return -ENODEV; in sunxi_mc_smp_init()
796 * We can't actually use the enable-method magic in the kernel. in sunxi_mc_smp_init()
804 ret = of_property_match_string(node, "enable-method", in sunxi_mc_smp_init()
812 return -ENODEV; in sunxi_mc_smp_init()
817 return -EINVAL; in sunxi_mc_smp_init()
820 pr_err("%s: CCI-400 not available\n", __func__); in sunxi_mc_smp_init()
821 return -ENODEV; in sunxi_mc_smp_init()
836 ret = -ENOMEM; in sunxi_mc_smp_init()
841 "sunxi-mc-smp"); in sunxi_mc_smp_init()
851 0, "sunxi-mc-smp"); in sunxi_mc_smp_init()
854 pr_err("%s: failed to map R-CPUCFG registers\n", in sunxi_mc_smp_init()
860 "sunxi-mc-smp"); in sunxi_mc_smp_init()
868 /* Configure CCI-400 for boot cluster */ in sunxi_mc_smp_init()
871 pr_err("%s: failed to configure boot cluster: %d\n", in sunxi_mc_smp_init()
886 /* Actually enable multi cluster SMP */ in sunxi_mc_smp_init()
889 pr_info("sunxi multi cluster SMP support installed\n"); in sunxi_mc_smp_init()