xref: /openbmc/linux/drivers/clk/mvebu/clk-cpu.c (revision d0b73b48)
1 /*
2  * Marvell MVEBU CPU clock handling.
3  *
4  * Copyright (C) 2012 Marvell
5  *
6  * Gregory CLEMENT <gregory.clement@free-electrons.com>
7  *
8  * This file is licensed under the terms of the GNU General Public
9  * License version 2.  This program is licensed "as is" without any
10  * warranty of any kind, whether express or implied.
11  */
12 #include <linux/kernel.h>
13 #include <linux/clkdev.h>
14 #include <linux/clk-provider.h>
15 #include <linux/of_address.h>
16 #include <linux/io.h>
17 #include <linux/of.h>
18 #include <linux/delay.h>
19 #include "clk-cpu.h"
20 
21 #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET    0x0
22 #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET   0xC
23 #define SYS_CTRL_CLK_DIVIDER_MASK	    0x3F
24 
25 #define MAX_CPU	    4
26 struct cpu_clk {
27 	struct clk_hw hw;
28 	int cpu;
29 	const char *clk_name;
30 	const char *parent_name;
31 	void __iomem *reg_base;
32 };
33 
34 static struct clk **clks;
35 
36 static struct clk_onecell_data clk_data;
37 
38 #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
39 
40 static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
41 					 unsigned long parent_rate)
42 {
43 	struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
44 	u32 reg, div;
45 
46 	reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
47 	div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
48 	return parent_rate / div;
49 }
50 
51 static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
52 			       unsigned long *parent_rate)
53 {
54 	/* Valid ratio are 1:1, 1:2 and 1:3 */
55 	u32 div;
56 
57 	div = *parent_rate / rate;
58 	if (div == 0)
59 		div = 1;
60 	else if (div > 3)
61 		div = 3;
62 
63 	return *parent_rate / div;
64 }
65 
66 static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
67 			    unsigned long parent_rate)
68 {
69 	struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
70 	u32 reg, div;
71 	u32 reload_mask;
72 
73 	div = parent_rate / rate;
74 	reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
75 		& (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
76 		| (div << (cpuclk->cpu * 8));
77 	writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
78 	/* Set clock divider reload smooth bit mask */
79 	reload_mask = 1 << (20 + cpuclk->cpu);
80 
81 	reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
82 	    | reload_mask;
83 	writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
84 
85 	/* Now trigger the clock update */
86 	reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
87 	    | 1 << 24;
88 	writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
89 
90 	/* Wait for clocks to settle down then clear reload request */
91 	udelay(1000);
92 	reg &= ~(reload_mask | 1 << 24);
93 	writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
94 	udelay(1000);
95 
96 	return 0;
97 }
98 
99 static const struct clk_ops cpu_ops = {
100 	.recalc_rate = clk_cpu_recalc_rate,
101 	.round_rate = clk_cpu_round_rate,
102 	.set_rate = clk_cpu_set_rate,
103 };
104 
105 void __init of_cpu_clk_setup(struct device_node *node)
106 {
107 	struct cpu_clk *cpuclk;
108 	void __iomem *clock_complex_base = of_iomap(node, 0);
109 	int ncpus = 0;
110 	struct device_node *dn;
111 
112 	if (clock_complex_base == NULL) {
113 		pr_err("%s: clock-complex base register not set\n",
114 			__func__);
115 		return;
116 	}
117 
118 	for_each_node_by_type(dn, "cpu")
119 		ncpus++;
120 
121 	cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
122 	if (WARN_ON(!cpuclk))
123 		return;
124 
125 	clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
126 	if (WARN_ON(!clks))
127 		goto clks_out;
128 
129 	for_each_node_by_type(dn, "cpu") {
130 		struct clk_init_data init;
131 		struct clk *clk;
132 		struct clk *parent_clk;
133 		char *clk_name = kzalloc(5, GFP_KERNEL);
134 		int cpu, err;
135 
136 		if (WARN_ON(!clk_name))
137 			goto bail_out;
138 
139 		err = of_property_read_u32(dn, "reg", &cpu);
140 		if (WARN_ON(err))
141 			goto bail_out;
142 
143 		sprintf(clk_name, "cpu%d", cpu);
144 		parent_clk = of_clk_get(node, 0);
145 
146 		cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
147 		cpuclk[cpu].clk_name = clk_name;
148 		cpuclk[cpu].cpu = cpu;
149 		cpuclk[cpu].reg_base = clock_complex_base;
150 		cpuclk[cpu].hw.init = &init;
151 
152 		init.name = cpuclk[cpu].clk_name;
153 		init.ops = &cpu_ops;
154 		init.flags = 0;
155 		init.parent_names = &cpuclk[cpu].parent_name;
156 		init.num_parents = 1;
157 
158 		clk = clk_register(NULL, &cpuclk[cpu].hw);
159 		if (WARN_ON(IS_ERR(clk)))
160 			goto bail_out;
161 		clks[cpu] = clk;
162 	}
163 	clk_data.clk_num = MAX_CPU;
164 	clk_data.clks = clks;
165 	of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
166 
167 	return;
168 bail_out:
169 	kfree(clks);
170 	while(ncpus--)
171 		kfree(cpuclk[ncpus].clk_name);
172 clks_out:
173 	kfree(cpuclk);
174 }
175 
176 static const __initconst struct of_device_id clk_cpu_match[] = {
177 	{
178 		.compatible = "marvell,armada-xp-cpu-clock",
179 		.data = of_cpu_clk_setup,
180 	},
181 	{
182 		/* sentinel */
183 	},
184 };
185 
186 void __init mvebu_cpu_clk_init(void)
187 {
188 	of_clk_init(clk_cpu_match);
189 }
190