1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2a7a2b311SHeiko Stuebner /*
3a7a2b311SHeiko Stuebner * Copyright (c) 2013 MundoReader S.L.
4a7a2b311SHeiko Stuebner * Author: Heiko Stuebner <heiko@sntech.de>
5a7a2b311SHeiko Stuebner */
6a7a2b311SHeiko Stuebner
7a7a2b311SHeiko Stuebner #include <linux/delay.h>
8a7a2b311SHeiko Stuebner #include <linux/init.h>
9a7a2b311SHeiko Stuebner #include <linux/smp.h>
10a7a2b311SHeiko Stuebner #include <linux/io.h>
11a7a2b311SHeiko Stuebner #include <linux/of.h>
12a7a2b311SHeiko Stuebner #include <linux/of_address.h>
13d003b58cSHeiko Stuebner #include <linux/regmap.h>
14d003b58cSHeiko Stuebner #include <linux/mfd/syscon.h>
15a7a2b311SHeiko Stuebner
163ee851e2SKever Yang #include <linux/reset.h>
173ee851e2SKever Yang #include <linux/cpu.h>
18a7a2b311SHeiko Stuebner #include <asm/cacheflush.h>
19f54b91fdSRomain Perier #include <asm/cp15.h>
20a7a2b311SHeiko Stuebner #include <asm/smp_scu.h>
21a7a2b311SHeiko Stuebner #include <asm/smp_plat.h>
22a7a2b311SHeiko Stuebner #include <asm/mach/map.h>
23a7a2b311SHeiko Stuebner
24a7a2b311SHeiko Stuebner #include "core.h"
25a7a2b311SHeiko Stuebner
26a7a2b311SHeiko Stuebner static void __iomem *scu_base_addr;
27a7a2b311SHeiko Stuebner static void __iomem *sram_base_addr;
28a7a2b311SHeiko Stuebner static int ncores;
29a7a2b311SHeiko Stuebner
30a7a2b311SHeiko Stuebner #define PMU_PWRDN_CON 0x08
31a7a2b311SHeiko Stuebner #define PMU_PWRDN_ST 0x0c
32a7a2b311SHeiko Stuebner
33a7a2b311SHeiko Stuebner #define PMU_PWRDN_SCU 4
34a7a2b311SHeiko Stuebner
35d003b58cSHeiko Stuebner static struct regmap *pmu;
369def7ccfSHeiko Stuebner static int has_pmu = true;
37a7a2b311SHeiko Stuebner
pmu_power_domain_is_on(int pd)38d003b58cSHeiko Stuebner static int pmu_power_domain_is_on(int pd)
39a7a2b311SHeiko Stuebner {
40d003b58cSHeiko Stuebner u32 val;
41d003b58cSHeiko Stuebner int ret;
42d003b58cSHeiko Stuebner
43d003b58cSHeiko Stuebner ret = regmap_read(pmu, PMU_PWRDN_ST, &val);
44d003b58cSHeiko Stuebner if (ret < 0)
45d003b58cSHeiko Stuebner return ret;
46d003b58cSHeiko Stuebner
47d003b58cSHeiko Stuebner return !(val & BIT(pd));
48a7a2b311SHeiko Stuebner }
49a7a2b311SHeiko Stuebner
rockchip_get_core_reset(int cpu)50bd76d738SKrzysztof Kozlowski static struct reset_control *rockchip_get_core_reset(int cpu)
513ee851e2SKever Yang {
523ee851e2SKever Yang struct device *dev = get_cpu_device(cpu);
533ee851e2SKever Yang struct device_node *np;
543ee851e2SKever Yang
553ee851e2SKever Yang /* The cpu device is only available after the initial core bringup */
563ee851e2SKever Yang if (dev)
573ee851e2SKever Yang np = dev->of_node;
583ee851e2SKever Yang else
5926d51929SPeter Griffin np = of_get_cpu_node(cpu, NULL);
603ee851e2SKever Yang
61da45adf9SPhilipp Zabel return of_reset_control_get_exclusive(np, NULL);
623ee851e2SKever Yang }
633ee851e2SKever Yang
pmu_set_power_domain(int pd,bool on)64d003b58cSHeiko Stuebner static int pmu_set_power_domain(int pd, bool on)
65a7a2b311SHeiko Stuebner {
66d003b58cSHeiko Stuebner u32 val = (on) ? 0 : BIT(pd);
67fe4407c0SCaesar Wang struct reset_control *rstc = rockchip_get_core_reset(pd);
68d003b58cSHeiko Stuebner int ret;
69a7a2b311SHeiko Stuebner
70fe4407c0SCaesar Wang if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
71fe4407c0SCaesar Wang pr_err("%s: could not get reset control for core %d\n",
72fe4407c0SCaesar Wang __func__, pd);
73fe4407c0SCaesar Wang return PTR_ERR(rstc);
74fe4407c0SCaesar Wang }
75fe4407c0SCaesar Wang
763ee851e2SKever Yang /*
773ee851e2SKever Yang * We need to soft reset the cpu when we turn off the cpu power domain,
783ee851e2SKever Yang * or else the active processors might be stalled when the individual
793ee851e2SKever Yang * processor is powered down.
803ee851e2SKever Yang */
81fe4407c0SCaesar Wang if (!IS_ERR(rstc) && !on)
823ee851e2SKever Yang reset_control_assert(rstc);
833ee851e2SKever Yang
849def7ccfSHeiko Stuebner if (has_pmu) {
85d003b58cSHeiko Stuebner ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
86d003b58cSHeiko Stuebner if (ret < 0) {
879def7ccfSHeiko Stuebner pr_err("%s: could not update power domain\n",
889def7ccfSHeiko Stuebner __func__);
89d003b58cSHeiko Stuebner return ret;
90d003b58cSHeiko Stuebner }
91d003b58cSHeiko Stuebner
92d003b58cSHeiko Stuebner ret = -1;
93d003b58cSHeiko Stuebner while (ret != on) {
94d003b58cSHeiko Stuebner ret = pmu_power_domain_is_on(pd);
95d003b58cSHeiko Stuebner if (ret < 0) {
96d003b58cSHeiko Stuebner pr_err("%s: could not read power domain state\n",
97d003b58cSHeiko Stuebner __func__);
98d003b58cSHeiko Stuebner return ret;
99d003b58cSHeiko Stuebner }
100d003b58cSHeiko Stuebner }
1019def7ccfSHeiko Stuebner }
102d003b58cSHeiko Stuebner
103fe4407c0SCaesar Wang if (!IS_ERR(rstc)) {
104fe4407c0SCaesar Wang if (on)
105fe4407c0SCaesar Wang reset_control_deassert(rstc);
106fe4407c0SCaesar Wang reset_control_put(rstc);
107fe4407c0SCaesar Wang }
108fe4407c0SCaesar Wang
109d003b58cSHeiko Stuebner return 0;
110a7a2b311SHeiko Stuebner }
111a7a2b311SHeiko Stuebner
112a7a2b311SHeiko Stuebner /*
113a7a2b311SHeiko Stuebner * Handling of CPU cores
114a7a2b311SHeiko Stuebner */
115a7a2b311SHeiko Stuebner
rockchip_boot_secondary(unsigned int cpu,struct task_struct * idle)116374d4dd3SPaul Gortmaker static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle)
117a7a2b311SHeiko Stuebner {
1183ee851e2SKever Yang int ret;
1193ee851e2SKever Yang
1209def7ccfSHeiko Stuebner if (!sram_base_addr || (has_pmu && !pmu)) {
121a7a2b311SHeiko Stuebner pr_err("%s: sram or pmu missing for cpu boot\n", __func__);
122a7a2b311SHeiko Stuebner return -ENXIO;
123a7a2b311SHeiko Stuebner }
124a7a2b311SHeiko Stuebner
125a7a2b311SHeiko Stuebner if (cpu >= ncores) {
126a7a2b311SHeiko Stuebner pr_err("%s: cpu %d outside maximum number of cpus %d\n",
127a7a2b311SHeiko Stuebner __func__, cpu, ncores);
128a7a2b311SHeiko Stuebner return -ENXIO;
129a7a2b311SHeiko Stuebner }
130a7a2b311SHeiko Stuebner
131a7a2b311SHeiko Stuebner /* start the core */
1323ee851e2SKever Yang ret = pmu_set_power_domain(0 + cpu, true);
1333ee851e2SKever Yang if (ret < 0)
1343ee851e2SKever Yang return ret;
1353ee851e2SKever Yang
1363ee851e2SKever Yang if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
1377f0b61adSCaesar Wang /*
1387f0b61adSCaesar Wang * We communicate with the bootrom to active the cpus other
1393ee851e2SKever Yang * than cpu0, after a blob of initialize code, they will
140*2946aa90SJulia Lawall * stay at wfe state, once they are activated, they will check
1413ee851e2SKever Yang * the mailbox:
1423ee851e2SKever Yang * sram_base_addr + 4: 0xdeadbeaf
1433ee851e2SKever Yang * sram_base_addr + 8: start address for pc
144fe4407c0SCaesar Wang * The cpu0 need to wait the other cpus other than cpu0 entering
145fe4407c0SCaesar Wang * the wfe state.The wait time is affected by many aspects.
146fe4407c0SCaesar Wang * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
1477f0b61adSCaesar Wang */
148fe4407c0SCaesar Wang mdelay(1); /* ensure the cpus other than cpu0 to startup */
149fe4407c0SCaesar Wang
15064fc2a94SFlorian Fainelli writel(__pa_symbol(secondary_startup), sram_base_addr + 8);
1513ee851e2SKever Yang writel(0xDEADBEAF, sram_base_addr + 4);
1523ee851e2SKever Yang dsb_sev();
1533ee851e2SKever Yang }
1543ee851e2SKever Yang
1553ee851e2SKever Yang return 0;
156a7a2b311SHeiko Stuebner }
157a7a2b311SHeiko Stuebner
158a7a2b311SHeiko Stuebner /**
159a7a2b311SHeiko Stuebner * rockchip_smp_prepare_sram - populate necessary sram block
160a7a2b311SHeiko Stuebner * Starting cores execute the code residing at the start of the on-chip sram
161a7a2b311SHeiko Stuebner * after power-on. Therefore make sure, this sram region is reserved and
162a7a2b311SHeiko Stuebner * big enough. After this check, copy the trampoline code that directs the
163a7a2b311SHeiko Stuebner * core to the real startup code in ram into the sram-region.
164a7a2b311SHeiko Stuebner * @node: mmio-sram device node
165a7a2b311SHeiko Stuebner */
rockchip_smp_prepare_sram(struct device_node * node)166a7a2b311SHeiko Stuebner static int __init rockchip_smp_prepare_sram(struct device_node *node)
167a7a2b311SHeiko Stuebner {
168a7a2b311SHeiko Stuebner unsigned int trampoline_sz = &rockchip_secondary_trampoline_end -
169a7a2b311SHeiko Stuebner &rockchip_secondary_trampoline;
170a7a2b311SHeiko Stuebner struct resource res;
171a7a2b311SHeiko Stuebner unsigned int rsize;
172a7a2b311SHeiko Stuebner int ret;
173a7a2b311SHeiko Stuebner
174a7a2b311SHeiko Stuebner ret = of_address_to_resource(node, 0, &res);
175a7a2b311SHeiko Stuebner if (ret < 0) {
176a8e65e06SRob Herring pr_err("%s: could not get address for node %pOF\n",
177a8e65e06SRob Herring __func__, node);
178a7a2b311SHeiko Stuebner return ret;
179a7a2b311SHeiko Stuebner }
180a7a2b311SHeiko Stuebner
181a7a2b311SHeiko Stuebner rsize = resource_size(&res);
182a7a2b311SHeiko Stuebner if (rsize < trampoline_sz) {
1830b973c65SColin Ian King pr_err("%s: reserved block with size 0x%x is too small for trampoline size 0x%x\n",
184a7a2b311SHeiko Stuebner __func__, rsize, trampoline_sz);
185a7a2b311SHeiko Stuebner return -EINVAL;
186a7a2b311SHeiko Stuebner }
187a7a2b311SHeiko Stuebner
188a7a2b311SHeiko Stuebner /* set the boot function for the sram code */
18964fc2a94SFlorian Fainelli rockchip_boot_fn = __pa_symbol(secondary_startup);
190a7a2b311SHeiko Stuebner
191a7a2b311SHeiko Stuebner /* copy the trampoline to sram, that runs during startup of the core */
192423e85e9SIvan T. Ivanov memcpy_toio(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
193a7a2b311SHeiko Stuebner flush_cache_all();
194a7a2b311SHeiko Stuebner outer_clean_range(0, trampoline_sz);
195a7a2b311SHeiko Stuebner
196a7a2b311SHeiko Stuebner dsb_sev();
197a7a2b311SHeiko Stuebner
198a7a2b311SHeiko Stuebner return 0;
199a7a2b311SHeiko Stuebner }
200a7a2b311SHeiko Stuebner
201bd76d738SKrzysztof Kozlowski static const struct regmap_config rockchip_pmu_regmap_config = {
20203151b89SJeffy Chen .name = "rockchip-pmu",
203d003b58cSHeiko Stuebner .reg_bits = 32,
204d003b58cSHeiko Stuebner .val_bits = 32,
205d003b58cSHeiko Stuebner .reg_stride = 4,
206d003b58cSHeiko Stuebner };
207d003b58cSHeiko Stuebner
rockchip_smp_prepare_pmu(void)208d003b58cSHeiko Stuebner static int __init rockchip_smp_prepare_pmu(void)
209d003b58cSHeiko Stuebner {
210d003b58cSHeiko Stuebner struct device_node *node;
211d003b58cSHeiko Stuebner void __iomem *pmu_base;
212d003b58cSHeiko Stuebner
2136de2d21aSHeiko Stuebner /*
2146de2d21aSHeiko Stuebner * This function is only called via smp_ops->smp_prepare_cpu().
2156de2d21aSHeiko Stuebner * That only happens if a "/cpus" device tree node exists
2166de2d21aSHeiko Stuebner * and has an "enable-method" property that selects the SMP
2176de2d21aSHeiko Stuebner * operations defined herein.
2186de2d21aSHeiko Stuebner */
2196de2d21aSHeiko Stuebner node = of_find_node_by_path("/cpus");
2206de2d21aSHeiko Stuebner
2216de2d21aSHeiko Stuebner pmu = syscon_regmap_lookup_by_phandle(node, "rockchip,pmu");
2226de2d21aSHeiko Stuebner of_node_put(node);
2236de2d21aSHeiko Stuebner if (!IS_ERR(pmu))
2246de2d21aSHeiko Stuebner return 0;
2256de2d21aSHeiko Stuebner
226d003b58cSHeiko Stuebner pmu = syscon_regmap_lookup_by_compatible("rockchip,rk3066-pmu");
227d003b58cSHeiko Stuebner if (!IS_ERR(pmu))
228d003b58cSHeiko Stuebner return 0;
229d003b58cSHeiko Stuebner
230d003b58cSHeiko Stuebner /* fallback, create our own regmap for the pmu area */
231d003b58cSHeiko Stuebner pmu = NULL;
232d003b58cSHeiko Stuebner node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-pmu");
233d003b58cSHeiko Stuebner if (!node) {
234d003b58cSHeiko Stuebner pr_err("%s: could not find pmu dt node\n", __func__);
235d003b58cSHeiko Stuebner return -ENODEV;
236d003b58cSHeiko Stuebner }
237d003b58cSHeiko Stuebner
238d003b58cSHeiko Stuebner pmu_base = of_iomap(node, 0);
239fbd7af04SWen Yang of_node_put(node);
240d003b58cSHeiko Stuebner if (!pmu_base) {
241d003b58cSHeiko Stuebner pr_err("%s: could not map pmu registers\n", __func__);
242d003b58cSHeiko Stuebner return -ENOMEM;
243d003b58cSHeiko Stuebner }
244d003b58cSHeiko Stuebner
245d003b58cSHeiko Stuebner pmu = regmap_init_mmio(NULL, pmu_base, &rockchip_pmu_regmap_config);
246d003b58cSHeiko Stuebner if (IS_ERR(pmu)) {
247d003b58cSHeiko Stuebner int ret = PTR_ERR(pmu);
248d003b58cSHeiko Stuebner
249d003b58cSHeiko Stuebner iounmap(pmu_base);
250d003b58cSHeiko Stuebner pmu = NULL;
251d003b58cSHeiko Stuebner pr_err("%s: regmap init failed\n", __func__);
252d003b58cSHeiko Stuebner return ret;
253d003b58cSHeiko Stuebner }
254d003b58cSHeiko Stuebner
255d003b58cSHeiko Stuebner return 0;
256d003b58cSHeiko Stuebner }
257d003b58cSHeiko Stuebner
rockchip_smp_prepare_cpus(unsigned int max_cpus)258a7a2b311SHeiko Stuebner static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus)
259a7a2b311SHeiko Stuebner {
260a7a2b311SHeiko Stuebner struct device_node *node;
261a7a2b311SHeiko Stuebner unsigned int i;
262a7a2b311SHeiko Stuebner
2633ee851e2SKever Yang node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-smp-sram");
2643ee851e2SKever Yang if (!node) {
2653ee851e2SKever Yang pr_err("%s: could not find sram dt node\n", __func__);
2663ee851e2SKever Yang return;
2673ee851e2SKever Yang }
2683ee851e2SKever Yang
2693ee851e2SKever Yang sram_base_addr = of_iomap(node, 0);
2703ee851e2SKever Yang if (!sram_base_addr) {
2713ee851e2SKever Yang pr_err("%s: could not map sram registers\n", __func__);
272c2af88f1SWen Yang of_node_put(node);
2733ee851e2SKever Yang return;
2743ee851e2SKever Yang }
2753ee851e2SKever Yang
276c2af88f1SWen Yang if (has_pmu && rockchip_smp_prepare_pmu()) {
277c2af88f1SWen Yang of_node_put(node);
2783ee851e2SKever Yang return;
279c2af88f1SWen Yang }
2803ee851e2SKever Yang
2813ee851e2SKever Yang if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
282c2af88f1SWen Yang if (rockchip_smp_prepare_sram(node)) {
283c2af88f1SWen Yang of_node_put(node);
2843ee851e2SKever Yang return;
285c2af88f1SWen Yang }
2863ee851e2SKever Yang
2873ee851e2SKever Yang /* enable the SCU power domain */
2883ee851e2SKever Yang pmu_set_power_domain(PMU_PWRDN_SCU, true);
2893ee851e2SKever Yang
290c2af88f1SWen Yang of_node_put(node);
291a7a2b311SHeiko Stuebner node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
292a7a2b311SHeiko Stuebner if (!node) {
293a7a2b311SHeiko Stuebner pr_err("%s: missing scu\n", __func__);
294a7a2b311SHeiko Stuebner return;
295a7a2b311SHeiko Stuebner }
296a7a2b311SHeiko Stuebner
297a7a2b311SHeiko Stuebner scu_base_addr = of_iomap(node, 0);
298a7a2b311SHeiko Stuebner if (!scu_base_addr) {
299a7a2b311SHeiko Stuebner pr_err("%s: could not map scu registers\n", __func__);
300c2af88f1SWen Yang of_node_put(node);
301a7a2b311SHeiko Stuebner return;
302a7a2b311SHeiko Stuebner }
303a7a2b311SHeiko Stuebner
304a7a2b311SHeiko Stuebner /*
3053ee851e2SKever Yang * While the number of cpus is gathered from dt, also get the
3063ee851e2SKever Yang * number of cores from the scu to verify this value when
3073ee851e2SKever Yang * booting the cores.
308a7a2b311SHeiko Stuebner */
309a7a2b311SHeiko Stuebner ncores = scu_get_core_count(scu_base_addr);
3103ee851e2SKever Yang pr_err("%s: ncores %d\n", __func__, ncores);
311a7a2b311SHeiko Stuebner
312a7a2b311SHeiko Stuebner scu_enable(scu_base_addr);
3133ee851e2SKever Yang } else {
3143ee851e2SKever Yang unsigned int l2ctlr;
3153ee851e2SKever Yang
3163ee851e2SKever Yang asm ("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
3173ee851e2SKever Yang ncores = ((l2ctlr >> 24) & 0x3) + 1;
3183ee851e2SKever Yang }
319c2af88f1SWen Yang of_node_put(node);
320a7a2b311SHeiko Stuebner
321a7a2b311SHeiko Stuebner /* Make sure that all cores except the first are really off */
322a7a2b311SHeiko Stuebner for (i = 1; i < ncores; i++)
323a7a2b311SHeiko Stuebner pmu_set_power_domain(0 + i, false);
324a7a2b311SHeiko Stuebner }
325a7a2b311SHeiko Stuebner
rk3036_smp_prepare_cpus(unsigned int max_cpus)3269def7ccfSHeiko Stuebner static void __init rk3036_smp_prepare_cpus(unsigned int max_cpus)
3279def7ccfSHeiko Stuebner {
3289def7ccfSHeiko Stuebner has_pmu = false;
3299def7ccfSHeiko Stuebner
3309def7ccfSHeiko Stuebner rockchip_smp_prepare_cpus(max_cpus);
3319def7ccfSHeiko Stuebner }
3329def7ccfSHeiko Stuebner
333f54b91fdSRomain Perier #ifdef CONFIG_HOTPLUG_CPU
rockchip_cpu_kill(unsigned int cpu)334f54b91fdSRomain Perier static int rockchip_cpu_kill(unsigned int cpu)
335f54b91fdSRomain Perier {
336e306bc16SCaesar Wang /*
337e306bc16SCaesar Wang * We need a delay here to ensure that the dying CPU can finish
338e306bc16SCaesar Wang * executing v7_coherency_exit() and reach the WFI/WFE state
339e306bc16SCaesar Wang * prior to having the power domain disabled.
340e306bc16SCaesar Wang */
341e306bc16SCaesar Wang mdelay(1);
342e306bc16SCaesar Wang
343f54b91fdSRomain Perier pmu_set_power_domain(0 + cpu, false);
344f54b91fdSRomain Perier return 1;
345f54b91fdSRomain Perier }
346f54b91fdSRomain Perier
rockchip_cpu_die(unsigned int cpu)347f54b91fdSRomain Perier static void rockchip_cpu_die(unsigned int cpu)
348f54b91fdSRomain Perier {
349f54b91fdSRomain Perier v7_exit_coherency_flush(louis);
350f54b91fdSRomain Perier while (1)
351f54b91fdSRomain Perier cpu_do_idle();
352f54b91fdSRomain Perier }
353f54b91fdSRomain Perier #endif
354f54b91fdSRomain Perier
35526dc88fbSHeiko Stuebner static const struct smp_operations rk3036_smp_ops __initconst = {
3569def7ccfSHeiko Stuebner .smp_prepare_cpus = rk3036_smp_prepare_cpus,
3579def7ccfSHeiko Stuebner .smp_boot_secondary = rockchip_boot_secondary,
3589def7ccfSHeiko Stuebner #ifdef CONFIG_HOTPLUG_CPU
3599def7ccfSHeiko Stuebner .cpu_kill = rockchip_cpu_kill,
3609def7ccfSHeiko Stuebner .cpu_die = rockchip_cpu_die,
3619def7ccfSHeiko Stuebner #endif
3629def7ccfSHeiko Stuebner };
3639def7ccfSHeiko Stuebner
36475305275SMasahiro Yamada static const struct smp_operations rockchip_smp_ops __initconst = {
365a7a2b311SHeiko Stuebner .smp_prepare_cpus = rockchip_smp_prepare_cpus,
366a7a2b311SHeiko Stuebner .smp_boot_secondary = rockchip_boot_secondary,
367f54b91fdSRomain Perier #ifdef CONFIG_HOTPLUG_CPU
368f54b91fdSRomain Perier .cpu_kill = rockchip_cpu_kill,
369f54b91fdSRomain Perier .cpu_die = rockchip_cpu_die,
370f54b91fdSRomain Perier #endif
371a7a2b311SHeiko Stuebner };
3727f0b61adSCaesar Wang
3739def7ccfSHeiko Stuebner CPU_METHOD_OF_DECLARE(rk3036_smp, "rockchip,rk3036-smp", &rk3036_smp_ops);
37426ab69cbSHeiko Stübner CPU_METHOD_OF_DECLARE(rk3066_smp, "rockchip,rk3066-smp", &rockchip_smp_ops);
375