xref: /openbmc/linux/arch/arm/mach-versatile/spc.c (revision 5a729246)
1*5a729246SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d7445676SArnd Bergmann /*
3d7445676SArnd Bergmann  * Versatile Express Serial Power Controller (SPC) support
4d7445676SArnd Bergmann  *
5d7445676SArnd Bergmann  * Copyright (C) 2013 ARM Ltd.
6d7445676SArnd Bergmann  *
7d7445676SArnd Bergmann  * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
8d7445676SArnd Bergmann  *          Achin Gupta           <achin.gupta@arm.com>
9d7445676SArnd Bergmann  *          Lorenzo Pieralisi     <lorenzo.pieralisi@arm.com>
10d7445676SArnd Bergmann  */
11d7445676SArnd Bergmann 
12d7445676SArnd Bergmann #include <linux/clk-provider.h>
13d7445676SArnd Bergmann #include <linux/clkdev.h>
14d7445676SArnd Bergmann #include <linux/cpu.h>
15d7445676SArnd Bergmann #include <linux/delay.h>
16d7445676SArnd Bergmann #include <linux/err.h>
17d7445676SArnd Bergmann #include <linux/interrupt.h>
18d7445676SArnd Bergmann #include <linux/io.h>
19d7445676SArnd Bergmann #include <linux/platform_device.h>
20d7445676SArnd Bergmann #include <linux/pm_opp.h>
21d7445676SArnd Bergmann #include <linux/slab.h>
22d7445676SArnd Bergmann #include <linux/semaphore.h>
23d7445676SArnd Bergmann 
24d7445676SArnd Bergmann #include <asm/cacheflush.h>
25d7445676SArnd Bergmann 
26d7445676SArnd Bergmann #include "spc.h"
27d7445676SArnd Bergmann 
28d7445676SArnd Bergmann #define SPCLOG "vexpress-spc: "
29d7445676SArnd Bergmann 
30d7445676SArnd Bergmann #define PERF_LVL_A15		0x00
31d7445676SArnd Bergmann #define PERF_REQ_A15		0x04
32d7445676SArnd Bergmann #define PERF_LVL_A7		0x08
33d7445676SArnd Bergmann #define PERF_REQ_A7		0x0c
34d7445676SArnd Bergmann #define COMMS			0x10
35d7445676SArnd Bergmann #define COMMS_REQ		0x14
36d7445676SArnd Bergmann #define PWC_STATUS		0x18
37d7445676SArnd Bergmann #define PWC_FLAG		0x1c
38d7445676SArnd Bergmann 
39d7445676SArnd Bergmann /* SPC wake-up IRQs status and mask */
40d7445676SArnd Bergmann #define WAKE_INT_MASK		0x24
41d7445676SArnd Bergmann #define WAKE_INT_RAW		0x28
42d7445676SArnd Bergmann #define WAKE_INT_STAT		0x2c
43d7445676SArnd Bergmann /* SPC power down registers */
44d7445676SArnd Bergmann #define A15_PWRDN_EN		0x30
45d7445676SArnd Bergmann #define A7_PWRDN_EN		0x34
46d7445676SArnd Bergmann /* SPC per-CPU mailboxes */
47d7445676SArnd Bergmann #define A15_BX_ADDR0		0x68
48d7445676SArnd Bergmann #define A7_BX_ADDR0		0x78
49d7445676SArnd Bergmann 
50d7445676SArnd Bergmann /* SPC CPU/cluster reset statue */
51d7445676SArnd Bergmann #define STANDBYWFI_STAT		0x3c
52d7445676SArnd Bergmann #define STANDBYWFI_STAT_A15_CPU_MASK(cpu)	(1 << (cpu))
53d7445676SArnd Bergmann #define STANDBYWFI_STAT_A7_CPU_MASK(cpu)	(1 << (3 + (cpu)))
54d7445676SArnd Bergmann 
55d7445676SArnd Bergmann /* SPC system config interface registers */
56d7445676SArnd Bergmann #define SYSCFG_WDATA		0x70
57d7445676SArnd Bergmann #define SYSCFG_RDATA		0x74
58d7445676SArnd Bergmann 
59d7445676SArnd Bergmann /* A15/A7 OPP virtual register base */
60d7445676SArnd Bergmann #define A15_PERFVAL_BASE	0xC10
61d7445676SArnd Bergmann #define A7_PERFVAL_BASE		0xC30
62d7445676SArnd Bergmann 
63d7445676SArnd Bergmann /* Config interface control bits */
64d7445676SArnd Bergmann #define SYSCFG_START		BIT(31)
65d7445676SArnd Bergmann #define SYSCFG_SCC		(6 << 20)
66d7445676SArnd Bergmann #define SYSCFG_STAT		(14 << 20)
67d7445676SArnd Bergmann 
68d7445676SArnd Bergmann /* wake-up interrupt masks */
69d7445676SArnd Bergmann #define GBL_WAKEUP_INT_MSK	(0x3 << 10)
70d7445676SArnd Bergmann 
71d7445676SArnd Bergmann /* TC2 static dual-cluster configuration */
72d7445676SArnd Bergmann #define MAX_CLUSTERS		2
73d7445676SArnd Bergmann 
74d7445676SArnd Bergmann /*
75d7445676SArnd Bergmann  * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
76d7445676SArnd Bergmann  * operation, the operation could start just before jiffie is about
77d7445676SArnd Bergmann  * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
78d7445676SArnd Bergmann  */
79d7445676SArnd Bergmann #define TIMEOUT_US	20000
80d7445676SArnd Bergmann 
81d7445676SArnd Bergmann #define MAX_OPPS	8
82d7445676SArnd Bergmann #define CA15_DVFS	0
83d7445676SArnd Bergmann #define CA7_DVFS	1
84d7445676SArnd Bergmann #define SPC_SYS_CFG	2
85d7445676SArnd Bergmann #define STAT_COMPLETE(type)	((1 << 0) << (type << 2))
86d7445676SArnd Bergmann #define STAT_ERR(type)		((1 << 1) << (type << 2))
87d7445676SArnd Bergmann #define RESPONSE_MASK(type)	(STAT_COMPLETE(type) | STAT_ERR(type))
88d7445676SArnd Bergmann 
89d7445676SArnd Bergmann struct ve_spc_opp {
90d7445676SArnd Bergmann 	unsigned long freq;
91d7445676SArnd Bergmann 	unsigned long u_volt;
92d7445676SArnd Bergmann };
93d7445676SArnd Bergmann 
94d7445676SArnd Bergmann struct ve_spc_drvdata {
95d7445676SArnd Bergmann 	void __iomem *baseaddr;
96d7445676SArnd Bergmann 	/*
97d7445676SArnd Bergmann 	 * A15s cluster identifier
98d7445676SArnd Bergmann 	 * It corresponds to A15 processors MPIDR[15:8] bitfield
99d7445676SArnd Bergmann 	 */
100d7445676SArnd Bergmann 	u32 a15_clusid;
101d7445676SArnd Bergmann 	uint32_t cur_rsp_mask;
102d7445676SArnd Bergmann 	uint32_t cur_rsp_stat;
103d7445676SArnd Bergmann 	struct semaphore sem;
104d7445676SArnd Bergmann 	struct completion done;
105d7445676SArnd Bergmann 	struct ve_spc_opp *opps[MAX_CLUSTERS];
106d7445676SArnd Bergmann 	int num_opps[MAX_CLUSTERS];
107d7445676SArnd Bergmann };
108d7445676SArnd Bergmann 
109d7445676SArnd Bergmann static struct ve_spc_drvdata *info;
110d7445676SArnd Bergmann 
cluster_is_a15(u32 cluster)111d7445676SArnd Bergmann static inline bool cluster_is_a15(u32 cluster)
112d7445676SArnd Bergmann {
113d7445676SArnd Bergmann 	return cluster == info->a15_clusid;
114d7445676SArnd Bergmann }
115d7445676SArnd Bergmann 
116d7445676SArnd Bergmann /**
117ecf0aa53SLinus Torvalds  * ve_spc_global_wakeup_irq() - sets/clears global wakeup IRQs
118ecf0aa53SLinus Torvalds  *
119ecf0aa53SLinus Torvalds  * @set: if true, global wake-up IRQs are set, if false they are cleared
120d7445676SArnd Bergmann  *
121d7445676SArnd Bergmann  * Function to set/clear global wakeup IRQs. Not protected by locking since
122d7445676SArnd Bergmann  * it might be used in code paths where normal cacheable locks are not
123d7445676SArnd Bergmann  * working. Locking must be provided by the caller to ensure atomicity.
124d7445676SArnd Bergmann  */
ve_spc_global_wakeup_irq(bool set)125d7445676SArnd Bergmann void ve_spc_global_wakeup_irq(bool set)
126d7445676SArnd Bergmann {
127d7445676SArnd Bergmann 	u32 reg;
128d7445676SArnd Bergmann 
129d7445676SArnd Bergmann 	reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
130d7445676SArnd Bergmann 
131d7445676SArnd Bergmann 	if (set)
132d7445676SArnd Bergmann 		reg |= GBL_WAKEUP_INT_MSK;
133d7445676SArnd Bergmann 	else
134d7445676SArnd Bergmann 		reg &= ~GBL_WAKEUP_INT_MSK;
135d7445676SArnd Bergmann 
136d7445676SArnd Bergmann 	writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
137d7445676SArnd Bergmann }
138d7445676SArnd Bergmann 
139d7445676SArnd Bergmann /**
140ecf0aa53SLinus Torvalds  * ve_spc_cpu_wakeup_irq() - sets/clears per-CPU wake-up IRQs
141d7445676SArnd Bergmann  *
142d7445676SArnd Bergmann  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
143d7445676SArnd Bergmann  * @cpu: mpidr[7:0] bitfield describing cpu affinity level
144d7445676SArnd Bergmann  * @set: if true, wake-up IRQs are set, if false they are cleared
145ecf0aa53SLinus Torvalds  *
146ecf0aa53SLinus Torvalds  * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
147ecf0aa53SLinus Torvalds  * it might be used in code paths where normal cacheable locks are not
148ecf0aa53SLinus Torvalds  * working. Locking must be provided by the caller to ensure atomicity.
149d7445676SArnd Bergmann  */
ve_spc_cpu_wakeup_irq(u32 cluster,u32 cpu,bool set)150d7445676SArnd Bergmann void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
151d7445676SArnd Bergmann {
152d7445676SArnd Bergmann 	u32 mask, reg;
153d7445676SArnd Bergmann 
154d7445676SArnd Bergmann 	if (cluster >= MAX_CLUSTERS)
155d7445676SArnd Bergmann 		return;
156d7445676SArnd Bergmann 
157d7445676SArnd Bergmann 	mask = BIT(cpu);
158d7445676SArnd Bergmann 
159d7445676SArnd Bergmann 	if (!cluster_is_a15(cluster))
160d7445676SArnd Bergmann 		mask <<= 4;
161d7445676SArnd Bergmann 
162d7445676SArnd Bergmann 	reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
163d7445676SArnd Bergmann 
164d7445676SArnd Bergmann 	if (set)
165d7445676SArnd Bergmann 		reg |= mask;
166d7445676SArnd Bergmann 	else
167d7445676SArnd Bergmann 		reg &= ~mask;
168d7445676SArnd Bergmann 
169d7445676SArnd Bergmann 	writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
170d7445676SArnd Bergmann }
171d7445676SArnd Bergmann 
172d7445676SArnd Bergmann /**
173d7445676SArnd Bergmann  * ve_spc_set_resume_addr() - set the jump address used for warm boot
174d7445676SArnd Bergmann  *
175d7445676SArnd Bergmann  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
176d7445676SArnd Bergmann  * @cpu: mpidr[7:0] bitfield describing cpu affinity level
177d7445676SArnd Bergmann  * @addr: physical resume address
178d7445676SArnd Bergmann  */
ve_spc_set_resume_addr(u32 cluster,u32 cpu,u32 addr)179d7445676SArnd Bergmann void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
180d7445676SArnd Bergmann {
181d7445676SArnd Bergmann 	void __iomem *baseaddr;
182d7445676SArnd Bergmann 
183d7445676SArnd Bergmann 	if (cluster >= MAX_CLUSTERS)
184d7445676SArnd Bergmann 		return;
185d7445676SArnd Bergmann 
186d7445676SArnd Bergmann 	if (cluster_is_a15(cluster))
187d7445676SArnd Bergmann 		baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
188d7445676SArnd Bergmann 	else
189d7445676SArnd Bergmann 		baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
190d7445676SArnd Bergmann 
191d7445676SArnd Bergmann 	writel_relaxed(addr, baseaddr);
192d7445676SArnd Bergmann }
193d7445676SArnd Bergmann 
194d7445676SArnd Bergmann /**
195ecf0aa53SLinus Torvalds  * ve_spc_powerdown() - enables/disables cluster powerdown
196ecf0aa53SLinus Torvalds  *
197ecf0aa53SLinus Torvalds  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
198ecf0aa53SLinus Torvalds  * @enable: if true enables powerdown, if false disables it
199d7445676SArnd Bergmann  *
200d7445676SArnd Bergmann  * Function to enable/disable cluster powerdown. Not protected by locking
201d7445676SArnd Bergmann  * since it might be used in code paths where normal cacheable locks are not
202d7445676SArnd Bergmann  * working. Locking must be provided by the caller to ensure atomicity.
203d7445676SArnd Bergmann  */
ve_spc_powerdown(u32 cluster,bool enable)204d7445676SArnd Bergmann void ve_spc_powerdown(u32 cluster, bool enable)
205d7445676SArnd Bergmann {
206d7445676SArnd Bergmann 	u32 pwdrn_reg;
207d7445676SArnd Bergmann 
208d7445676SArnd Bergmann 	if (cluster >= MAX_CLUSTERS)
209d7445676SArnd Bergmann 		return;
210d7445676SArnd Bergmann 
211d7445676SArnd Bergmann 	pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN;
212d7445676SArnd Bergmann 	writel_relaxed(enable, info->baseaddr + pwdrn_reg);
213d7445676SArnd Bergmann }
214d7445676SArnd Bergmann 
standbywfi_cpu_mask(u32 cpu,u32 cluster)215d7445676SArnd Bergmann static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
216d7445676SArnd Bergmann {
217d7445676SArnd Bergmann 	return cluster_is_a15(cluster) ?
218d7445676SArnd Bergmann 		  STANDBYWFI_STAT_A15_CPU_MASK(cpu)
219d7445676SArnd Bergmann 		: STANDBYWFI_STAT_A7_CPU_MASK(cpu);
220d7445676SArnd Bergmann }
221d7445676SArnd Bergmann 
222d7445676SArnd Bergmann /**
223ecf0aa53SLinus Torvalds  * ve_spc_cpu_in_wfi() - Checks if the specified CPU is in WFI or not
224d7445676SArnd Bergmann  *
225d7445676SArnd Bergmann  * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
226d7445676SArnd Bergmann  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
227d7445676SArnd Bergmann  *
228d7445676SArnd Bergmann  * @return: non-zero if and only if the specified CPU is in WFI
229d7445676SArnd Bergmann  *
230d7445676SArnd Bergmann  * Take care when interpreting the result of this function: a CPU might
231d7445676SArnd Bergmann  * be in WFI temporarily due to idle, and is not necessarily safely
232d7445676SArnd Bergmann  * parked.
233d7445676SArnd Bergmann  */
ve_spc_cpu_in_wfi(u32 cpu,u32 cluster)234d7445676SArnd Bergmann int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
235d7445676SArnd Bergmann {
236d7445676SArnd Bergmann 	int ret;
237d7445676SArnd Bergmann 	u32 mask = standbywfi_cpu_mask(cpu, cluster);
238d7445676SArnd Bergmann 
239d7445676SArnd Bergmann 	if (cluster >= MAX_CLUSTERS)
240d7445676SArnd Bergmann 		return 1;
241d7445676SArnd Bergmann 
242d7445676SArnd Bergmann 	ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT);
243d7445676SArnd Bergmann 
244d7445676SArnd Bergmann 	pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n",
245d7445676SArnd Bergmann 		 __func__, STANDBYWFI_STAT, ret, mask);
246d7445676SArnd Bergmann 
247d7445676SArnd Bergmann 	return ret & mask;
248d7445676SArnd Bergmann }
249d7445676SArnd Bergmann 
ve_spc_get_performance(int cluster,u32 * freq)250d7445676SArnd Bergmann static int ve_spc_get_performance(int cluster, u32 *freq)
251d7445676SArnd Bergmann {
252d7445676SArnd Bergmann 	struct ve_spc_opp *opps = info->opps[cluster];
253d7445676SArnd Bergmann 	u32 perf_cfg_reg = 0;
254d7445676SArnd Bergmann 	u32 perf;
255d7445676SArnd Bergmann 
256d7445676SArnd Bergmann 	perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7;
257d7445676SArnd Bergmann 
258d7445676SArnd Bergmann 	perf = readl_relaxed(info->baseaddr + perf_cfg_reg);
259d7445676SArnd Bergmann 	if (perf >= info->num_opps[cluster])
260d7445676SArnd Bergmann 		return -EINVAL;
261d7445676SArnd Bergmann 
262d7445676SArnd Bergmann 	opps += perf;
263d7445676SArnd Bergmann 	*freq = opps->freq;
264d7445676SArnd Bergmann 
265d7445676SArnd Bergmann 	return 0;
266d7445676SArnd Bergmann }
267d7445676SArnd Bergmann 
268d7445676SArnd Bergmann /* find closest match to given frequency in OPP table */
ve_spc_round_performance(int cluster,u32 freq)269d7445676SArnd Bergmann static int ve_spc_round_performance(int cluster, u32 freq)
270d7445676SArnd Bergmann {
271d7445676SArnd Bergmann 	int idx, max_opp = info->num_opps[cluster];
272d7445676SArnd Bergmann 	struct ve_spc_opp *opps = info->opps[cluster];
273d7445676SArnd Bergmann 	u32 fmin = 0, fmax = ~0, ftmp;
274d7445676SArnd Bergmann 
275d7445676SArnd Bergmann 	freq /= 1000; /* OPP entries in kHz */
276d7445676SArnd Bergmann 	for (idx = 0; idx < max_opp; idx++, opps++) {
277d7445676SArnd Bergmann 		ftmp = opps->freq;
278d7445676SArnd Bergmann 		if (ftmp >= freq) {
279d7445676SArnd Bergmann 			if (ftmp <= fmax)
280d7445676SArnd Bergmann 				fmax = ftmp;
281d7445676SArnd Bergmann 		} else {
282d7445676SArnd Bergmann 			if (ftmp >= fmin)
283d7445676SArnd Bergmann 				fmin = ftmp;
284d7445676SArnd Bergmann 		}
285d7445676SArnd Bergmann 	}
286d7445676SArnd Bergmann 	if (fmax != ~0)
287d7445676SArnd Bergmann 		return fmax * 1000;
288d7445676SArnd Bergmann 	else
289d7445676SArnd Bergmann 		return fmin * 1000;
290d7445676SArnd Bergmann }
291d7445676SArnd Bergmann 
ve_spc_find_performance_index(int cluster,u32 freq)292d7445676SArnd Bergmann static int ve_spc_find_performance_index(int cluster, u32 freq)
293d7445676SArnd Bergmann {
294d7445676SArnd Bergmann 	int idx, max_opp = info->num_opps[cluster];
295d7445676SArnd Bergmann 	struct ve_spc_opp *opps = info->opps[cluster];
296d7445676SArnd Bergmann 
297d7445676SArnd Bergmann 	for (idx = 0; idx < max_opp; idx++, opps++)
298d7445676SArnd Bergmann 		if (opps->freq == freq)
299d7445676SArnd Bergmann 			break;
300d7445676SArnd Bergmann 	return (idx == max_opp) ? -EINVAL : idx;
301d7445676SArnd Bergmann }
302d7445676SArnd Bergmann 
ve_spc_waitforcompletion(int req_type)303d7445676SArnd Bergmann static int ve_spc_waitforcompletion(int req_type)
304d7445676SArnd Bergmann {
305d7445676SArnd Bergmann 	int ret = wait_for_completion_interruptible_timeout(
306d7445676SArnd Bergmann 			&info->done, usecs_to_jiffies(TIMEOUT_US));
307d7445676SArnd Bergmann 	if (ret == 0)
308d7445676SArnd Bergmann 		ret = -ETIMEDOUT;
309d7445676SArnd Bergmann 	else if (ret > 0)
310d7445676SArnd Bergmann 		ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
311d7445676SArnd Bergmann 	return ret;
312d7445676SArnd Bergmann }
313d7445676SArnd Bergmann 
ve_spc_set_performance(int cluster,u32 freq)314d7445676SArnd Bergmann static int ve_spc_set_performance(int cluster, u32 freq)
315d7445676SArnd Bergmann {
316d7445676SArnd Bergmann 	u32 perf_cfg_reg;
317d7445676SArnd Bergmann 	int ret, perf, req_type;
318d7445676SArnd Bergmann 
319d7445676SArnd Bergmann 	if (cluster_is_a15(cluster)) {
320d7445676SArnd Bergmann 		req_type = CA15_DVFS;
321d7445676SArnd Bergmann 		perf_cfg_reg = PERF_LVL_A15;
322d7445676SArnd Bergmann 	} else {
323d7445676SArnd Bergmann 		req_type = CA7_DVFS;
324d7445676SArnd Bergmann 		perf_cfg_reg = PERF_LVL_A7;
325d7445676SArnd Bergmann 	}
326d7445676SArnd Bergmann 
327d7445676SArnd Bergmann 	perf = ve_spc_find_performance_index(cluster, freq);
328d7445676SArnd Bergmann 
329d7445676SArnd Bergmann 	if (perf < 0)
330d7445676SArnd Bergmann 		return perf;
331d7445676SArnd Bergmann 
332d7445676SArnd Bergmann 	if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
333d7445676SArnd Bergmann 		return -ETIME;
334d7445676SArnd Bergmann 
335d7445676SArnd Bergmann 	init_completion(&info->done);
336d7445676SArnd Bergmann 	info->cur_rsp_mask = RESPONSE_MASK(req_type);
337d7445676SArnd Bergmann 
338d7445676SArnd Bergmann 	writel(perf, info->baseaddr + perf_cfg_reg);
339d7445676SArnd Bergmann 	ret = ve_spc_waitforcompletion(req_type);
340d7445676SArnd Bergmann 
341d7445676SArnd Bergmann 	info->cur_rsp_mask = 0;
342d7445676SArnd Bergmann 	up(&info->sem);
343d7445676SArnd Bergmann 
344d7445676SArnd Bergmann 	return ret;
345d7445676SArnd Bergmann }
346d7445676SArnd Bergmann 
ve_spc_read_sys_cfg(int func,int offset,uint32_t * data)347d7445676SArnd Bergmann static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data)
348d7445676SArnd Bergmann {
349d7445676SArnd Bergmann 	int ret;
350d7445676SArnd Bergmann 
351d7445676SArnd Bergmann 	if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
352d7445676SArnd Bergmann 		return -ETIME;
353d7445676SArnd Bergmann 
354d7445676SArnd Bergmann 	init_completion(&info->done);
355d7445676SArnd Bergmann 	info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG);
356d7445676SArnd Bergmann 
357d7445676SArnd Bergmann 	/* Set the control value */
358d7445676SArnd Bergmann 	writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS);
359d7445676SArnd Bergmann 	ret = ve_spc_waitforcompletion(SPC_SYS_CFG);
360d7445676SArnd Bergmann 
361d7445676SArnd Bergmann 	if (ret == 0)
362d7445676SArnd Bergmann 		*data = readl(info->baseaddr + SYSCFG_RDATA);
363d7445676SArnd Bergmann 
364d7445676SArnd Bergmann 	info->cur_rsp_mask = 0;
365d7445676SArnd Bergmann 	up(&info->sem);
366d7445676SArnd Bergmann 
367d7445676SArnd Bergmann 	return ret;
368d7445676SArnd Bergmann }
369d7445676SArnd Bergmann 
ve_spc_irq_handler(int irq,void * data)370d7445676SArnd Bergmann static irqreturn_t ve_spc_irq_handler(int irq, void *data)
371d7445676SArnd Bergmann {
372d7445676SArnd Bergmann 	struct ve_spc_drvdata *drv_data = data;
373d7445676SArnd Bergmann 	uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
374d7445676SArnd Bergmann 
375d7445676SArnd Bergmann 	if (info->cur_rsp_mask & status) {
376d7445676SArnd Bergmann 		info->cur_rsp_stat = status;
377d7445676SArnd Bergmann 		complete(&drv_data->done);
378d7445676SArnd Bergmann 	}
379d7445676SArnd Bergmann 
380d7445676SArnd Bergmann 	return IRQ_HANDLED;
381d7445676SArnd Bergmann }
382d7445676SArnd Bergmann 
383d7445676SArnd Bergmann /*
384d7445676SArnd Bergmann  *  +--------------------------+
385d7445676SArnd Bergmann  *  | 31      20 | 19        0 |
386d7445676SArnd Bergmann  *  +--------------------------+
387d7445676SArnd Bergmann  *  |   m_volt   |  freq(kHz)  |
388d7445676SArnd Bergmann  *  +--------------------------+
389d7445676SArnd Bergmann  */
390d7445676SArnd Bergmann #define MULT_FACTOR	20
391d7445676SArnd Bergmann #define VOLT_SHIFT	20
392d7445676SArnd Bergmann #define FREQ_MASK	(0xFFFFF)
ve_spc_populate_opps(uint32_t cluster)393d7445676SArnd Bergmann static int ve_spc_populate_opps(uint32_t cluster)
394d7445676SArnd Bergmann {
395d7445676SArnd Bergmann 	uint32_t data = 0, off, ret, idx;
396d7445676SArnd Bergmann 	struct ve_spc_opp *opps;
397d7445676SArnd Bergmann 
398d7445676SArnd Bergmann 	opps = kcalloc(MAX_OPPS, sizeof(*opps), GFP_KERNEL);
399d7445676SArnd Bergmann 	if (!opps)
400d7445676SArnd Bergmann 		return -ENOMEM;
401d7445676SArnd Bergmann 
402d7445676SArnd Bergmann 	info->opps[cluster] = opps;
403d7445676SArnd Bergmann 
404d7445676SArnd Bergmann 	off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE;
405d7445676SArnd Bergmann 	for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) {
406d7445676SArnd Bergmann 		ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
407d7445676SArnd Bergmann 		if (!ret) {
408d7445676SArnd Bergmann 			opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
409d7445676SArnd Bergmann 			opps->u_volt = (data >> VOLT_SHIFT) * 1000;
410d7445676SArnd Bergmann 		} else {
411d7445676SArnd Bergmann 			break;
412d7445676SArnd Bergmann 		}
413d7445676SArnd Bergmann 	}
414d7445676SArnd Bergmann 	info->num_opps[cluster] = idx;
415d7445676SArnd Bergmann 
416d7445676SArnd Bergmann 	return ret;
417d7445676SArnd Bergmann }
418d7445676SArnd Bergmann 
ve_init_opp_table(struct device * cpu_dev)419d7445676SArnd Bergmann static int ve_init_opp_table(struct device *cpu_dev)
420d7445676SArnd Bergmann {
421d7445676SArnd Bergmann 	int cluster;
422d7445676SArnd Bergmann 	int idx, ret = 0, max_opp;
423d7445676SArnd Bergmann 	struct ve_spc_opp *opps;
424d7445676SArnd Bergmann 
425d7445676SArnd Bergmann 	cluster = topology_physical_package_id(cpu_dev->id);
426d7445676SArnd Bergmann 	cluster = cluster < 0 ? 0 : cluster;
427d7445676SArnd Bergmann 
428d7445676SArnd Bergmann 	max_opp = info->num_opps[cluster];
429d7445676SArnd Bergmann 	opps = info->opps[cluster];
430d7445676SArnd Bergmann 
431d7445676SArnd Bergmann 	for (idx = 0; idx < max_opp; idx++, opps++) {
432d7445676SArnd Bergmann 		ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
433d7445676SArnd Bergmann 		if (ret) {
434d7445676SArnd Bergmann 			dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
435d7445676SArnd Bergmann 				 opps->freq, opps->u_volt);
436d7445676SArnd Bergmann 			return ret;
437d7445676SArnd Bergmann 		}
438d7445676SArnd Bergmann 	}
439d7445676SArnd Bergmann 	return ret;
440d7445676SArnd Bergmann }
441d7445676SArnd Bergmann 
ve_spc_init(void __iomem * baseaddr,u32 a15_clusid,int irq)442d7445676SArnd Bergmann int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
443d7445676SArnd Bergmann {
444d7445676SArnd Bergmann 	int ret;
445d7445676SArnd Bergmann 	info = kzalloc(sizeof(*info), GFP_KERNEL);
446d7445676SArnd Bergmann 	if (!info)
447d7445676SArnd Bergmann 		return -ENOMEM;
448d7445676SArnd Bergmann 
449d7445676SArnd Bergmann 	info->baseaddr = baseaddr;
450d7445676SArnd Bergmann 	info->a15_clusid = a15_clusid;
451d7445676SArnd Bergmann 
452d7445676SArnd Bergmann 	if (irq <= 0) {
453d7445676SArnd Bergmann 		pr_err(SPCLOG "Invalid IRQ %d\n", irq);
454d7445676SArnd Bergmann 		kfree(info);
455d7445676SArnd Bergmann 		return -EINVAL;
456d7445676SArnd Bergmann 	}
457d7445676SArnd Bergmann 
458d7445676SArnd Bergmann 	init_completion(&info->done);
459d7445676SArnd Bergmann 
460d7445676SArnd Bergmann 	readl_relaxed(info->baseaddr + PWC_STATUS);
461d7445676SArnd Bergmann 
462d7445676SArnd Bergmann 	ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
463d7445676SArnd Bergmann 				| IRQF_ONESHOT, "vexpress-spc", info);
464d7445676SArnd Bergmann 	if (ret) {
465d7445676SArnd Bergmann 		pr_err(SPCLOG "IRQ %d request failed\n", irq);
466d7445676SArnd Bergmann 		kfree(info);
467d7445676SArnd Bergmann 		return -ENODEV;
468d7445676SArnd Bergmann 	}
469d7445676SArnd Bergmann 
470d7445676SArnd Bergmann 	sema_init(&info->sem, 1);
471d7445676SArnd Bergmann 	/*
472d7445676SArnd Bergmann 	 * Multi-cluster systems may need this data when non-coherent, during
473d7445676SArnd Bergmann 	 * cluster power-up/power-down. Make sure driver info reaches main
474d7445676SArnd Bergmann 	 * memory.
475d7445676SArnd Bergmann 	 */
476d7445676SArnd Bergmann 	sync_cache_w(info);
477d7445676SArnd Bergmann 	sync_cache_w(&info);
478d7445676SArnd Bergmann 
479d7445676SArnd Bergmann 	return 0;
480d7445676SArnd Bergmann }
481d7445676SArnd Bergmann 
482d7445676SArnd Bergmann struct clk_spc {
483d7445676SArnd Bergmann 	struct clk_hw hw;
484d7445676SArnd Bergmann 	int cluster;
485d7445676SArnd Bergmann };
486d7445676SArnd Bergmann 
487d7445676SArnd Bergmann #define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
spc_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)488d7445676SArnd Bergmann static unsigned long spc_recalc_rate(struct clk_hw *hw,
489d7445676SArnd Bergmann 		unsigned long parent_rate)
490d7445676SArnd Bergmann {
491d7445676SArnd Bergmann 	struct clk_spc *spc = to_clk_spc(hw);
492d7445676SArnd Bergmann 	u32 freq;
493d7445676SArnd Bergmann 
494d7445676SArnd Bergmann 	if (ve_spc_get_performance(spc->cluster, &freq))
495d7445676SArnd Bergmann 		return -EIO;
496d7445676SArnd Bergmann 
497d7445676SArnd Bergmann 	return freq * 1000;
498d7445676SArnd Bergmann }
499d7445676SArnd Bergmann 
spc_round_rate(struct clk_hw * hw,unsigned long drate,unsigned long * parent_rate)500d7445676SArnd Bergmann static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
501d7445676SArnd Bergmann 		unsigned long *parent_rate)
502d7445676SArnd Bergmann {
503d7445676SArnd Bergmann 	struct clk_spc *spc = to_clk_spc(hw);
504d7445676SArnd Bergmann 
505d7445676SArnd Bergmann 	return ve_spc_round_performance(spc->cluster, drate);
506d7445676SArnd Bergmann }
507d7445676SArnd Bergmann 
spc_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)508d7445676SArnd Bergmann static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
509d7445676SArnd Bergmann 		unsigned long parent_rate)
510d7445676SArnd Bergmann {
511d7445676SArnd Bergmann 	struct clk_spc *spc = to_clk_spc(hw);
512d7445676SArnd Bergmann 
513d7445676SArnd Bergmann 	return ve_spc_set_performance(spc->cluster, rate / 1000);
514d7445676SArnd Bergmann }
515d7445676SArnd Bergmann 
516d7445676SArnd Bergmann static struct clk_ops clk_spc_ops = {
517d7445676SArnd Bergmann 	.recalc_rate = spc_recalc_rate,
518d7445676SArnd Bergmann 	.round_rate = spc_round_rate,
519d7445676SArnd Bergmann 	.set_rate = spc_set_rate,
520d7445676SArnd Bergmann };
521d7445676SArnd Bergmann 
ve_spc_clk_register(struct device * cpu_dev)522d7445676SArnd Bergmann static struct clk *ve_spc_clk_register(struct device *cpu_dev)
523d7445676SArnd Bergmann {
524d7445676SArnd Bergmann 	struct clk_init_data init;
525d7445676SArnd Bergmann 	struct clk_spc *spc;
526d7445676SArnd Bergmann 
527d7445676SArnd Bergmann 	spc = kzalloc(sizeof(*spc), GFP_KERNEL);
528d7445676SArnd Bergmann 	if (!spc)
529d7445676SArnd Bergmann 		return ERR_PTR(-ENOMEM);
530d7445676SArnd Bergmann 
531d7445676SArnd Bergmann 	spc->hw.init = &init;
532d7445676SArnd Bergmann 	spc->cluster = topology_physical_package_id(cpu_dev->id);
533d7445676SArnd Bergmann 
534d7445676SArnd Bergmann 	spc->cluster = spc->cluster < 0 ? 0 : spc->cluster;
535d7445676SArnd Bergmann 
536d7445676SArnd Bergmann 	init.name = dev_name(cpu_dev);
537d7445676SArnd Bergmann 	init.ops = &clk_spc_ops;
538d7445676SArnd Bergmann 	init.flags = CLK_GET_RATE_NOCACHE;
539d7445676SArnd Bergmann 	init.num_parents = 0;
540d7445676SArnd Bergmann 
541d7445676SArnd Bergmann 	return devm_clk_register(cpu_dev, &spc->hw);
542d7445676SArnd Bergmann }
543d7445676SArnd Bergmann 
ve_spc_clk_init(void)544d7445676SArnd Bergmann static int __init ve_spc_clk_init(void)
545d7445676SArnd Bergmann {
546d7445676SArnd Bergmann 	int cpu, cluster;
547d7445676SArnd Bergmann 	struct clk *clk;
548d7445676SArnd Bergmann 	bool init_opp_table[MAX_CLUSTERS] = { false };
549d7445676SArnd Bergmann 
550d7445676SArnd Bergmann 	if (!info)
551d7445676SArnd Bergmann 		return 0; /* Continue only if SPC is initialised */
552d7445676SArnd Bergmann 
553d7445676SArnd Bergmann 	if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
554d7445676SArnd Bergmann 		pr_err("failed to build OPP table\n");
555d7445676SArnd Bergmann 		return -ENODEV;
556d7445676SArnd Bergmann 	}
557d7445676SArnd Bergmann 
558d7445676SArnd Bergmann 	for_each_possible_cpu(cpu) {
559d7445676SArnd Bergmann 		struct device *cpu_dev = get_cpu_device(cpu);
560d7445676SArnd Bergmann 		if (!cpu_dev) {
561d7445676SArnd Bergmann 			pr_warn("failed to get cpu%d device\n", cpu);
562d7445676SArnd Bergmann 			continue;
563d7445676SArnd Bergmann 		}
564d7445676SArnd Bergmann 		clk = ve_spc_clk_register(cpu_dev);
565d7445676SArnd Bergmann 		if (IS_ERR(clk)) {
566d7445676SArnd Bergmann 			pr_warn("failed to register cpu%d clock\n", cpu);
567d7445676SArnd Bergmann 			continue;
568d7445676SArnd Bergmann 		}
569d7445676SArnd Bergmann 		if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
570d7445676SArnd Bergmann 			pr_warn("failed to register cpu%d clock lookup\n", cpu);
571d7445676SArnd Bergmann 			continue;
572d7445676SArnd Bergmann 		}
573d7445676SArnd Bergmann 
574d7445676SArnd Bergmann 		cluster = topology_physical_package_id(cpu_dev->id);
575ecf0aa53SLinus Torvalds 		if (cluster < 0 || init_opp_table[cluster])
576d7445676SArnd Bergmann 			continue;
577d7445676SArnd Bergmann 
578d7445676SArnd Bergmann 		if (ve_init_opp_table(cpu_dev))
579d7445676SArnd Bergmann 			pr_warn("failed to initialise cpu%d opp table\n", cpu);
580d7445676SArnd Bergmann 		else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
581d7445676SArnd Bergmann 			 topology_core_cpumask(cpu_dev->id)))
582d7445676SArnd Bergmann 			pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
583d7445676SArnd Bergmann 		else
584d7445676SArnd Bergmann 			init_opp_table[cluster] = true;
585d7445676SArnd Bergmann 	}
586d7445676SArnd Bergmann 
587d7445676SArnd Bergmann 	platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
588d7445676SArnd Bergmann 	return 0;
589d7445676SArnd Bergmann }
590d7445676SArnd Bergmann device_initcall(ve_spc_clk_init);
591