xref: /openbmc/linux/arch/arm/mach-hisi/platmcpm.c (revision 4f3db074)
1 /*
2  * Copyright (c) 2013-2014 Linaro Ltd.
3  * Copyright (c) 2013-2014 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  */
9 #include <linux/delay.h>
10 #include <linux/io.h>
11 #include <linux/memblock.h>
12 #include <linux/of_address.h>
13 
14 #include <asm/cputype.h>
15 #include <asm/cp15.h>
16 #include <asm/mcpm.h>
17 
18 #include "core.h"
19 
20 /* bits definition in SC_CPU_RESET_REQ[x]/SC_CPU_RESET_DREQ[x]
21  * 1 -- unreset; 0 -- reset
22  */
23 #define CORE_RESET_BIT(x)		(1 << x)
24 #define NEON_RESET_BIT(x)		(1 << (x + 4))
25 #define CORE_DEBUG_RESET_BIT(x)		(1 << (x + 9))
26 #define CLUSTER_L2_RESET_BIT		(1 << 8)
27 #define CLUSTER_DEBUG_RESET_BIT		(1 << 13)
28 
29 /*
30  * bits definition in SC_CPU_RESET_STATUS[x]
31  * 1 -- reset status; 0 -- unreset status
32  */
33 #define CORE_RESET_STATUS(x)		(1 << x)
34 #define NEON_RESET_STATUS(x)		(1 << (x + 4))
35 #define CORE_DEBUG_RESET_STATUS(x)	(1 << (x + 9))
36 #define CLUSTER_L2_RESET_STATUS		(1 << 8)
37 #define CLUSTER_DEBUG_RESET_STATUS	(1 << 13)
38 #define CORE_WFI_STATUS(x)		(1 << (x + 16))
39 #define CORE_WFE_STATUS(x)		(1 << (x + 20))
40 #define CORE_DEBUG_ACK(x)		(1 << (x + 24))
41 
42 #define SC_CPU_RESET_REQ(x)		(0x520 + (x << 3))	/* reset */
43 #define SC_CPU_RESET_DREQ(x)		(0x524 + (x << 3))	/* unreset */
44 #define SC_CPU_RESET_STATUS(x)		(0x1520 + (x << 3))
45 
46 #define FAB_SF_MODE			0x0c
47 #define FAB_SF_INVLD			0x10
48 
49 /* bits definition in FB_SF_INVLD */
50 #define FB_SF_INVLD_START		(1 << 8)
51 
52 #define HIP04_MAX_CLUSTERS		4
53 #define HIP04_MAX_CPUS_PER_CLUSTER	4
54 
55 #define POLL_MSEC	10
56 #define TIMEOUT_MSEC	1000
57 
58 static void __iomem *sysctrl, *fabric;
59 static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
60 static DEFINE_SPINLOCK(boot_lock);
61 static u32 fabric_phys_addr;
62 /*
63  * [0]: bootwrapper physical address
64  * [1]: bootwrapper size
65  * [2]: relocation address
66  * [3]: relocation size
67  */
68 static u32 hip04_boot_method[4];
69 
70 static bool hip04_cluster_is_down(unsigned int cluster)
71 {
72 	int i;
73 
74 	for (i = 0; i < HIP04_MAX_CPUS_PER_CLUSTER; i++)
75 		if (hip04_cpu_table[cluster][i])
76 			return false;
77 	return true;
78 }
79 
80 static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on)
81 {
82 	unsigned long data;
83 
84 	if (!fabric)
85 		BUG();
86 	data = readl_relaxed(fabric + FAB_SF_MODE);
87 	if (on)
88 		data |= 1 << cluster;
89 	else
90 		data &= ~(1 << cluster);
91 	writel_relaxed(data, fabric + FAB_SF_MODE);
92 	do {
93 		cpu_relax();
94 	} while (data != readl_relaxed(fabric + FAB_SF_MODE));
95 }
96 
97 static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster)
98 {
99 	unsigned long data;
100 	void __iomem *sys_dreq, *sys_status;
101 
102 	if (!sysctrl)
103 		return -ENODEV;
104 	if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
105 		return -EINVAL;
106 
107 	spin_lock_irq(&boot_lock);
108 
109 	if (hip04_cpu_table[cluster][cpu])
110 		goto out;
111 
112 	sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster);
113 	sys_status = sysctrl + SC_CPU_RESET_STATUS(cluster);
114 	if (hip04_cluster_is_down(cluster)) {
115 		data = CLUSTER_DEBUG_RESET_BIT;
116 		writel_relaxed(data, sys_dreq);
117 		do {
118 			cpu_relax();
119 			data = readl_relaxed(sys_status);
120 		} while (data & CLUSTER_DEBUG_RESET_STATUS);
121 	}
122 
123 	data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
124 	       CORE_DEBUG_RESET_BIT(cpu);
125 	writel_relaxed(data, sys_dreq);
126 	do {
127 		cpu_relax();
128 	} while (data == readl_relaxed(sys_status));
129 	/*
130 	 * We may fail to power up core again without this delay.
131 	 * It's not mentioned in document. It's found by test.
132 	 */
133 	udelay(20);
134 out:
135 	hip04_cpu_table[cluster][cpu]++;
136 	spin_unlock_irq(&boot_lock);
137 
138 	return 0;
139 }
140 
141 static void hip04_mcpm_power_down(void)
142 {
143 	unsigned int mpidr, cpu, cluster;
144 	bool skip_wfi = false, last_man = false;
145 
146 	mpidr = read_cpuid_mpidr();
147 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
148 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
149 
150 	__mcpm_cpu_going_down(cpu, cluster);
151 
152 	spin_lock(&boot_lock);
153 	BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
154 	hip04_cpu_table[cluster][cpu]--;
155 	if (hip04_cpu_table[cluster][cpu] == 1) {
156 		/* A power_up request went ahead of us. */
157 		skip_wfi = true;
158 	} else if (hip04_cpu_table[cluster][cpu] > 1) {
159 		pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
160 		BUG();
161 	}
162 
163 	last_man = hip04_cluster_is_down(cluster);
164 	if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
165 		spin_unlock(&boot_lock);
166 		/* Since it's Cortex A15, disable L2 prefetching. */
167 		asm volatile(
168 		"mcr	p15, 1, %0, c15, c0, 3 \n\t"
169 		"isb	\n\t"
170 		"dsb	"
171 		: : "r" (0x400) );
172 		v7_exit_coherency_flush(all);
173 		hip04_set_snoop_filter(cluster, 0);
174 		__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
175 	} else {
176 		spin_unlock(&boot_lock);
177 		v7_exit_coherency_flush(louis);
178 	}
179 
180 	__mcpm_cpu_down(cpu, cluster);
181 
182 	if (!skip_wfi)
183 		wfi();
184 }
185 
186 static int hip04_mcpm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
187 {
188 	unsigned int data, tries, count;
189 	int ret = -ETIMEDOUT;
190 
191 	BUG_ON(cluster >= HIP04_MAX_CLUSTERS ||
192 	       cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
193 
194 	count = TIMEOUT_MSEC / POLL_MSEC;
195 	spin_lock_irq(&boot_lock);
196 	for (tries = 0; tries < count; tries++) {
197 		if (hip04_cpu_table[cluster][cpu]) {
198 			ret = -EBUSY;
199 			goto err;
200 		}
201 		cpu_relax();
202 		data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
203 		if (data & CORE_WFI_STATUS(cpu))
204 			break;
205 		spin_unlock_irq(&boot_lock);
206 		/* Wait for clean L2 when the whole cluster is down. */
207 		msleep(POLL_MSEC);
208 		spin_lock_irq(&boot_lock);
209 	}
210 	if (tries >= count)
211 		goto err;
212 	data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
213 	       CORE_DEBUG_RESET_BIT(cpu);
214 	writel_relaxed(data, sysctrl + SC_CPU_RESET_REQ(cluster));
215 	for (tries = 0; tries < count; tries++) {
216 		cpu_relax();
217 		data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
218 		if (data & CORE_RESET_STATUS(cpu))
219 			break;
220 	}
221 	if (tries >= count)
222 		goto err;
223 	spin_unlock_irq(&boot_lock);
224 	return 0;
225 err:
226 	spin_unlock_irq(&boot_lock);
227 	return ret;
228 }
229 
230 static void hip04_mcpm_powered_up(void)
231 {
232 	unsigned int mpidr, cpu, cluster;
233 
234 	mpidr = read_cpuid_mpidr();
235 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
236 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
237 
238 	spin_lock(&boot_lock);
239 	if (!hip04_cpu_table[cluster][cpu])
240 		hip04_cpu_table[cluster][cpu] = 1;
241 	spin_unlock(&boot_lock);
242 }
243 
244 static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
245 {
246 	asm volatile ("			\n"
247 "	cmp	r0, #0			\n"
248 "	bxeq	lr			\n"
249 	/* calculate fabric phys address */
250 "	adr	r2, 2f			\n"
251 "	ldmia	r2, {r1, r3}		\n"
252 "	sub	r0, r2, r1		\n"
253 "	ldr	r2, [r0, r3]		\n"
254 	/* get cluster id from MPIDR */
255 "	mrc	p15, 0, r0, c0, c0, 5	\n"
256 "	ubfx	r1, r0, #8, #8		\n"
257 	/* 1 << cluster id */
258 "	mov	r0, #1			\n"
259 "	mov	r3, r0, lsl r1		\n"
260 "	ldr	r0, [r2, #"__stringify(FAB_SF_MODE)"]	\n"
261 "	tst	r0, r3			\n"
262 "	bxne	lr			\n"
263 "	orr	r1, r0, r3		\n"
264 "	str	r1, [r2, #"__stringify(FAB_SF_MODE)"]	\n"
265 "1:	ldr	r0, [r2, #"__stringify(FAB_SF_MODE)"]	\n"
266 "	tst	r0, r3			\n"
267 "	beq	1b			\n"
268 "	bx	lr			\n"
269 
270 "	.align	2			\n"
271 "2:	.word	.			\n"
272 "	.word	fabric_phys_addr	\n"
273 	);
274 }
275 
276 static const struct mcpm_platform_ops hip04_mcpm_ops = {
277 	.power_up		= hip04_mcpm_power_up,
278 	.power_down		= hip04_mcpm_power_down,
279 	.wait_for_powerdown	= hip04_mcpm_wait_for_powerdown,
280 	.powered_up		= hip04_mcpm_powered_up,
281 };
282 
283 static bool __init hip04_cpu_table_init(void)
284 {
285 	unsigned int mpidr, cpu, cluster;
286 
287 	mpidr = read_cpuid_mpidr();
288 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
289 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
290 
291 	if (cluster >= HIP04_MAX_CLUSTERS ||
292 	    cpu >= HIP04_MAX_CPUS_PER_CLUSTER) {
293 		pr_err("%s: boot CPU is out of bound!\n", __func__);
294 		return false;
295 	}
296 	hip04_set_snoop_filter(cluster, 1);
297 	hip04_cpu_table[cluster][cpu] = 1;
298 	return true;
299 }
300 
301 static int __init hip04_mcpm_init(void)
302 {
303 	struct device_node *np, *np_sctl, *np_fab;
304 	struct resource fab_res;
305 	void __iomem *relocation;
306 	int ret = -ENODEV;
307 
308 	np = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-bootwrapper");
309 	if (!np)
310 		goto err;
311 	ret = of_property_read_u32_array(np, "boot-method",
312 					 &hip04_boot_method[0], 4);
313 	if (ret)
314 		goto err;
315 	np_sctl = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
316 	if (!np_sctl)
317 		goto err;
318 	np_fab = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-fabric");
319 	if (!np_fab)
320 		goto err;
321 
322 	ret = memblock_reserve(hip04_boot_method[0], hip04_boot_method[1]);
323 	if (ret)
324 		goto err;
325 
326 	relocation = ioremap(hip04_boot_method[2], hip04_boot_method[3]);
327 	if (!relocation) {
328 		pr_err("failed to map relocation space\n");
329 		ret = -ENOMEM;
330 		goto err_reloc;
331 	}
332 	sysctrl = of_iomap(np_sctl, 0);
333 	if (!sysctrl) {
334 		pr_err("failed to get sysctrl base\n");
335 		ret = -ENOMEM;
336 		goto err_sysctrl;
337 	}
338 	ret = of_address_to_resource(np_fab, 0, &fab_res);
339 	if (ret) {
340 		pr_err("failed to get fabric base phys\n");
341 		goto err_fabric;
342 	}
343 	fabric_phys_addr = fab_res.start;
344 	sync_cache_w(&fabric_phys_addr);
345 	fabric = of_iomap(np_fab, 0);
346 	if (!fabric) {
347 		pr_err("failed to get fabric base\n");
348 		ret = -ENOMEM;
349 		goto err_fabric;
350 	}
351 
352 	if (!hip04_cpu_table_init()) {
353 		ret = -EINVAL;
354 		goto err_table;
355 	}
356 	ret = mcpm_platform_register(&hip04_mcpm_ops);
357 	if (ret) {
358 		goto err_table;
359 	}
360 
361 	/*
362 	 * Fill the instruction address that is used after secondary core
363 	 * out of reset.
364 	 */
365 	writel_relaxed(hip04_boot_method[0], relocation);
366 	writel_relaxed(0xa5a5a5a5, relocation + 4);	/* magic number */
367 	writel_relaxed(virt_to_phys(mcpm_entry_point), relocation + 8);
368 	writel_relaxed(0, relocation + 12);
369 	iounmap(relocation);
370 
371 	mcpm_sync_init(hip04_mcpm_power_up_setup);
372 	mcpm_smp_set_ops();
373 	pr_info("HiP04 MCPM initialized\n");
374 	return ret;
375 err_table:
376 	iounmap(fabric);
377 err_fabric:
378 	iounmap(sysctrl);
379 err_sysctrl:
380 	iounmap(relocation);
381 err_reloc:
382 	memblock_free(hip04_boot_method[0], hip04_boot_method[1]);
383 err:
384 	return ret;
385 }
386 early_initcall(hip04_mcpm_init);
387