xref: /openbmc/linux/arch/arm/mach-versatile/dcscb.c (revision d7445676e86900f8dc363825033ff62416c216e0)
1*d7445676SArnd Bergmann // SPDX-License-Identifier: GPL-2.0-only
2*d7445676SArnd Bergmann /*
3*d7445676SArnd Bergmann  * dcscb.c - Dual Cluster System Configuration Block
4*d7445676SArnd Bergmann  *
5*d7445676SArnd Bergmann  * Created by:	Nicolas Pitre, May 2012
6*d7445676SArnd Bergmann  * Copyright:	(C) 2012-2013  Linaro Limited
7*d7445676SArnd Bergmann  */
8*d7445676SArnd Bergmann 
9*d7445676SArnd Bergmann #include <linux/init.h>
10*d7445676SArnd Bergmann #include <linux/kernel.h>
11*d7445676SArnd Bergmann #include <linux/io.h>
12*d7445676SArnd Bergmann #include <linux/errno.h>
13*d7445676SArnd Bergmann #include <linux/of_address.h>
14*d7445676SArnd Bergmann #include <linux/vexpress.h>
15*d7445676SArnd Bergmann #include <linux/arm-cci.h>
16*d7445676SArnd Bergmann 
17*d7445676SArnd Bergmann #include <asm/mcpm.h>
18*d7445676SArnd Bergmann #include <asm/proc-fns.h>
19*d7445676SArnd Bergmann #include <asm/cacheflush.h>
20*d7445676SArnd Bergmann #include <asm/cputype.h>
21*d7445676SArnd Bergmann #include <asm/cp15.h>
22*d7445676SArnd Bergmann 
23*d7445676SArnd Bergmann #include "vexpress.h"
24*d7445676SArnd Bergmann 
25*d7445676SArnd Bergmann #define RST_HOLD0	0x0
26*d7445676SArnd Bergmann #define RST_HOLD1	0x4
27*d7445676SArnd Bergmann #define SYS_SWRESET	0x8
28*d7445676SArnd Bergmann #define RST_STAT0	0xc
29*d7445676SArnd Bergmann #define RST_STAT1	0x10
30*d7445676SArnd Bergmann #define EAG_CFG_R	0x20
31*d7445676SArnd Bergmann #define EAG_CFG_W	0x24
32*d7445676SArnd Bergmann #define KFC_CFG_R	0x28
33*d7445676SArnd Bergmann #define KFC_CFG_W	0x2c
34*d7445676SArnd Bergmann #define DCS_CFG_R	0x30
35*d7445676SArnd Bergmann 
36*d7445676SArnd Bergmann static void __iomem *dcscb_base;
37*d7445676SArnd Bergmann static int dcscb_allcpus_mask[2];
38*d7445676SArnd Bergmann 
39*d7445676SArnd Bergmann static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster)
40*d7445676SArnd Bergmann {
41*d7445676SArnd Bergmann 	unsigned int rst_hold, cpumask = (1 << cpu);
42*d7445676SArnd Bergmann 
43*d7445676SArnd Bergmann 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
44*d7445676SArnd Bergmann 	if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster]))
45*d7445676SArnd Bergmann 		return -EINVAL;
46*d7445676SArnd Bergmann 
47*d7445676SArnd Bergmann 	rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
48*d7445676SArnd Bergmann 	rst_hold &= ~(cpumask | (cpumask << 4));
49*d7445676SArnd Bergmann 	writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
50*d7445676SArnd Bergmann 	return 0;
51*d7445676SArnd Bergmann }
52*d7445676SArnd Bergmann 
53*d7445676SArnd Bergmann static int dcscb_cluster_powerup(unsigned int cluster)
54*d7445676SArnd Bergmann {
55*d7445676SArnd Bergmann 	unsigned int rst_hold;
56*d7445676SArnd Bergmann 
57*d7445676SArnd Bergmann 	pr_debug("%s: cluster %u\n", __func__, cluster);
58*d7445676SArnd Bergmann 	if (cluster >= 2)
59*d7445676SArnd Bergmann 		return -EINVAL;
60*d7445676SArnd Bergmann 
61*d7445676SArnd Bergmann 	/* remove cluster reset and add individual CPU's reset */
62*d7445676SArnd Bergmann 	rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
63*d7445676SArnd Bergmann 	rst_hold &= ~(1 << 8);
64*d7445676SArnd Bergmann 	rst_hold |= dcscb_allcpus_mask[cluster];
65*d7445676SArnd Bergmann 	writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
66*d7445676SArnd Bergmann 	return 0;
67*d7445676SArnd Bergmann }
68*d7445676SArnd Bergmann 
69*d7445676SArnd Bergmann static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
70*d7445676SArnd Bergmann {
71*d7445676SArnd Bergmann 	unsigned int rst_hold;
72*d7445676SArnd Bergmann 
73*d7445676SArnd Bergmann 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
74*d7445676SArnd Bergmann 	BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster]));
75*d7445676SArnd Bergmann 
76*d7445676SArnd Bergmann 	rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
77*d7445676SArnd Bergmann 	rst_hold |= (1 << cpu);
78*d7445676SArnd Bergmann 	writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
79*d7445676SArnd Bergmann }
80*d7445676SArnd Bergmann 
81*d7445676SArnd Bergmann static void dcscb_cluster_powerdown_prepare(unsigned int cluster)
82*d7445676SArnd Bergmann {
83*d7445676SArnd Bergmann 	unsigned int rst_hold;
84*d7445676SArnd Bergmann 
85*d7445676SArnd Bergmann 	pr_debug("%s: cluster %u\n", __func__, cluster);
86*d7445676SArnd Bergmann 	BUG_ON(cluster >= 2);
87*d7445676SArnd Bergmann 
88*d7445676SArnd Bergmann 	rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
89*d7445676SArnd Bergmann 	rst_hold |= (1 << 8);
90*d7445676SArnd Bergmann 	writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
91*d7445676SArnd Bergmann }
92*d7445676SArnd Bergmann 
93*d7445676SArnd Bergmann static void dcscb_cpu_cache_disable(void)
94*d7445676SArnd Bergmann {
95*d7445676SArnd Bergmann 	/* Disable and flush the local CPU cache. */
96*d7445676SArnd Bergmann 	v7_exit_coherency_flush(louis);
97*d7445676SArnd Bergmann }
98*d7445676SArnd Bergmann 
99*d7445676SArnd Bergmann static void dcscb_cluster_cache_disable(void)
100*d7445676SArnd Bergmann {
101*d7445676SArnd Bergmann 	/* Flush all cache levels for this cluster. */
102*d7445676SArnd Bergmann 	v7_exit_coherency_flush(all);
103*d7445676SArnd Bergmann 
104*d7445676SArnd Bergmann 	/*
105*d7445676SArnd Bergmann 	 * A full outer cache flush could be needed at this point
106*d7445676SArnd Bergmann 	 * on platforms with such a cache, depending on where the
107*d7445676SArnd Bergmann 	 * outer cache sits. In some cases the notion of a "last
108*d7445676SArnd Bergmann 	 * cluster standing" would need to be implemented if the
109*d7445676SArnd Bergmann 	 * outer cache is shared across clusters. In any case, when
110*d7445676SArnd Bergmann 	 * the outer cache needs flushing, there is no concurrent
111*d7445676SArnd Bergmann 	 * access to the cache controller to worry about and no
112*d7445676SArnd Bergmann 	 * special locking besides what is already provided by the
113*d7445676SArnd Bergmann 	 * MCPM state machinery is needed.
114*d7445676SArnd Bergmann 	 */
115*d7445676SArnd Bergmann 
116*d7445676SArnd Bergmann 	/*
117*d7445676SArnd Bergmann 	 * Disable cluster-level coherency by masking
118*d7445676SArnd Bergmann 	 * incoming snoops and DVM messages:
119*d7445676SArnd Bergmann 	 */
120*d7445676SArnd Bergmann 	cci_disable_port_by_cpu(read_cpuid_mpidr());
121*d7445676SArnd Bergmann }
122*d7445676SArnd Bergmann 
123*d7445676SArnd Bergmann static const struct mcpm_platform_ops dcscb_power_ops = {
124*d7445676SArnd Bergmann 	.cpu_powerup		= dcscb_cpu_powerup,
125*d7445676SArnd Bergmann 	.cluster_powerup	= dcscb_cluster_powerup,
126*d7445676SArnd Bergmann 	.cpu_powerdown_prepare	= dcscb_cpu_powerdown_prepare,
127*d7445676SArnd Bergmann 	.cluster_powerdown_prepare = dcscb_cluster_powerdown_prepare,
128*d7445676SArnd Bergmann 	.cpu_cache_disable	= dcscb_cpu_cache_disable,
129*d7445676SArnd Bergmann 	.cluster_cache_disable	= dcscb_cluster_cache_disable,
130*d7445676SArnd Bergmann };
131*d7445676SArnd Bergmann 
132*d7445676SArnd Bergmann extern void dcscb_power_up_setup(unsigned int affinity_level);
133*d7445676SArnd Bergmann 
134*d7445676SArnd Bergmann static int __init dcscb_init(void)
135*d7445676SArnd Bergmann {
136*d7445676SArnd Bergmann 	struct device_node *node;
137*d7445676SArnd Bergmann 	unsigned int cfg;
138*d7445676SArnd Bergmann 	int ret;
139*d7445676SArnd Bergmann 
140*d7445676SArnd Bergmann 	if (!cci_probed())
141*d7445676SArnd Bergmann 		return -ENODEV;
142*d7445676SArnd Bergmann 
143*d7445676SArnd Bergmann 	node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
144*d7445676SArnd Bergmann 	if (!node)
145*d7445676SArnd Bergmann 		return -ENODEV;
146*d7445676SArnd Bergmann 	dcscb_base = of_iomap(node, 0);
147*d7445676SArnd Bergmann 	if (!dcscb_base)
148*d7445676SArnd Bergmann 		return -EADDRNOTAVAIL;
149*d7445676SArnd Bergmann 	cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
150*d7445676SArnd Bergmann 	dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
151*d7445676SArnd Bergmann 	dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
152*d7445676SArnd Bergmann 
153*d7445676SArnd Bergmann 	ret = mcpm_platform_register(&dcscb_power_ops);
154*d7445676SArnd Bergmann 	if (!ret)
155*d7445676SArnd Bergmann 		ret = mcpm_sync_init(dcscb_power_up_setup);
156*d7445676SArnd Bergmann 	if (ret) {
157*d7445676SArnd Bergmann 		iounmap(dcscb_base);
158*d7445676SArnd Bergmann 		return ret;
159*d7445676SArnd Bergmann 	}
160*d7445676SArnd Bergmann 
161*d7445676SArnd Bergmann 	pr_info("VExpress DCSCB support installed\n");
162*d7445676SArnd Bergmann 
163*d7445676SArnd Bergmann 	/*
164*d7445676SArnd Bergmann 	 * Future entries into the kernel can now go
165*d7445676SArnd Bergmann 	 * through the cluster entry vectors.
166*d7445676SArnd Bergmann 	 */
167*d7445676SArnd Bergmann 	vexpress_flags_set(__pa_symbol(mcpm_entry_point));
168*d7445676SArnd Bergmann 
169*d7445676SArnd Bergmann 	return 0;
170*d7445676SArnd Bergmann }
171*d7445676SArnd Bergmann 
172*d7445676SArnd Bergmann early_initcall(dcscb_init);
173