1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Chen-Yu Tsai
4  *
5  * Chen-Yu Tsai <wens@csie.org>
6  *
7  * Allwinner A80 CPUS clock driver
8  *
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/clk-provider.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 
18 static DEFINE_SPINLOCK(sun9i_a80_cpus_lock);
19 
20 /**
21  * sun9i_a80_cpus_clk_setup() - Setup function for a80 cpus composite clk
22  */
23 
24 #define SUN9I_CPUS_MAX_PARENTS		4
25 #define SUN9I_CPUS_MUX_PARENT_PLL4	3
26 #define SUN9I_CPUS_MUX_SHIFT		16
27 #define SUN9I_CPUS_MUX_MASK		GENMASK(17, 16)
28 #define SUN9I_CPUS_MUX_GET_PARENT(reg)	((reg & SUN9I_CPUS_MUX_MASK) >> \
29 						SUN9I_CPUS_MUX_SHIFT)
30 
31 #define SUN9I_CPUS_DIV_SHIFT		4
32 #define SUN9I_CPUS_DIV_MASK		GENMASK(5, 4)
33 #define SUN9I_CPUS_DIV_GET(reg)		((reg & SUN9I_CPUS_DIV_MASK) >> \
34 						SUN9I_CPUS_DIV_SHIFT)
35 #define SUN9I_CPUS_DIV_SET(reg, div)	((reg & ~SUN9I_CPUS_DIV_MASK) | \
36 						(div << SUN9I_CPUS_DIV_SHIFT))
37 #define SUN9I_CPUS_PLL4_DIV_SHIFT	8
38 #define SUN9I_CPUS_PLL4_DIV_MASK	GENMASK(12, 8)
39 #define SUN9I_CPUS_PLL4_DIV_GET(reg)	((reg & SUN9I_CPUS_PLL4_DIV_MASK) >> \
40 						SUN9I_CPUS_PLL4_DIV_SHIFT)
41 #define SUN9I_CPUS_PLL4_DIV_SET(reg, div) ((reg & ~SUN9I_CPUS_PLL4_DIV_MASK) | \
42 						(div << SUN9I_CPUS_PLL4_DIV_SHIFT))
43 
44 struct sun9i_a80_cpus_clk {
45 	struct clk_hw hw;
46 	void __iomem *reg;
47 };
48 
49 #define to_sun9i_a80_cpus_clk(_hw) container_of(_hw, struct sun9i_a80_cpus_clk, hw)
50 
51 static unsigned long sun9i_a80_cpus_clk_recalc_rate(struct clk_hw *hw,
52 						    unsigned long parent_rate)
53 {
54 	struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw);
55 	unsigned long rate;
56 	u32 reg;
57 
58 	/* Fetch the register value */
59 	reg = readl(cpus->reg);
60 
61 	/* apply pre-divider first if parent is pll4 */
62 	if (SUN9I_CPUS_MUX_GET_PARENT(reg) == SUN9I_CPUS_MUX_PARENT_PLL4)
63 		parent_rate /= SUN9I_CPUS_PLL4_DIV_GET(reg) + 1;
64 
65 	/* clk divider */
66 	rate = parent_rate / (SUN9I_CPUS_DIV_GET(reg) + 1);
67 
68 	return rate;
69 }
70 
71 static long sun9i_a80_cpus_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
72 				     u8 parent, unsigned long parent_rate)
73 {
74 	u8 div, pre_div = 1;
75 
76 	/*
77 	 * clock can only divide, so we will never be able to achieve
78 	 * frequencies higher than the parent frequency
79 	 */
80 	if (parent_rate && rate > parent_rate)
81 		rate = parent_rate;
82 
83 	div = DIV_ROUND_UP(parent_rate, rate);
84 
85 	/* calculate pre-divider if parent is pll4 */
86 	if (parent == SUN9I_CPUS_MUX_PARENT_PLL4 && div > 4) {
87 		/* pre-divider is 1 ~ 32 */
88 		if (div < 32) {
89 			pre_div = div;
90 			div = 1;
91 		} else if (div < 64) {
92 			pre_div = DIV_ROUND_UP(div, 2);
93 			div = 2;
94 		} else if (div < 96) {
95 			pre_div = DIV_ROUND_UP(div, 3);
96 			div = 3;
97 		} else {
98 			pre_div = DIV_ROUND_UP(div, 4);
99 			div = 4;
100 		}
101 	}
102 
103 	/* we were asked to pass back divider values */
104 	if (divp) {
105 		*divp = div - 1;
106 		*pre_divp = pre_div - 1;
107 	}
108 
109 	return parent_rate / pre_div / div;
110 }
111 
112 static int sun9i_a80_cpus_clk_determine_rate(struct clk_hw *clk,
113 					     struct clk_rate_request *req)
114 {
115 	struct clk_hw *parent, *best_parent = NULL;
116 	int i, num_parents;
117 	unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
118 	unsigned long rate = req->rate;
119 
120 	/* find the parent that can help provide the fastest rate <= rate */
121 	num_parents = clk_hw_get_num_parents(clk);
122 	for (i = 0; i < num_parents; i++) {
123 		parent = clk_hw_get_parent_by_index(clk, i);
124 		if (!parent)
125 			continue;
126 		if (clk_hw_get_flags(clk) & CLK_SET_RATE_PARENT)
127 			parent_rate = clk_hw_round_rate(parent, rate);
128 		else
129 			parent_rate = clk_hw_get_rate(parent);
130 
131 		child_rate = sun9i_a80_cpus_clk_round(rate, NULL, NULL, i,
132 						      parent_rate);
133 
134 		if (child_rate <= rate && child_rate > best_child_rate) {
135 			best_parent = parent;
136 			best = parent_rate;
137 			best_child_rate = child_rate;
138 		}
139 	}
140 
141 	if (!best_parent)
142 		return -EINVAL;
143 
144 	req->best_parent_hw = best_parent;
145 	req->best_parent_rate = best;
146 	req->rate = best_child_rate;
147 
148 	return 0;
149 }
150 
151 static int sun9i_a80_cpus_clk_set_rate(struct clk_hw *hw, unsigned long rate,
152 				       unsigned long parent_rate)
153 {
154 	struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw);
155 	unsigned long flags;
156 	u8 div, pre_div, parent;
157 	u32 reg;
158 
159 	spin_lock_irqsave(&sun9i_a80_cpus_lock, flags);
160 
161 	reg = readl(cpus->reg);
162 
163 	/* need to know which parent is used to apply pre-divider */
164 	parent = SUN9I_CPUS_MUX_GET_PARENT(reg);
165 	sun9i_a80_cpus_clk_round(rate, &div, &pre_div, parent, parent_rate);
166 
167 	reg = SUN9I_CPUS_DIV_SET(reg, div);
168 	reg = SUN9I_CPUS_PLL4_DIV_SET(reg, pre_div);
169 	writel(reg, cpus->reg);
170 
171 	spin_unlock_irqrestore(&sun9i_a80_cpus_lock, flags);
172 
173 	return 0;
174 }
175 
176 static const struct clk_ops sun9i_a80_cpus_clk_ops = {
177 	.determine_rate	= sun9i_a80_cpus_clk_determine_rate,
178 	.recalc_rate	= sun9i_a80_cpus_clk_recalc_rate,
179 	.set_rate	= sun9i_a80_cpus_clk_set_rate,
180 };
181 
182 static void sun9i_a80_cpus_setup(struct device_node *node)
183 {
184 	const char *clk_name = node->name;
185 	const char *parents[SUN9I_CPUS_MAX_PARENTS];
186 	struct resource res;
187 	struct sun9i_a80_cpus_clk *cpus;
188 	struct clk_mux *mux;
189 	struct clk *clk;
190 	int ret;
191 
192 	cpus = kzalloc(sizeof(*cpus), GFP_KERNEL);
193 	if (!cpus)
194 		return;
195 
196 	cpus->reg = of_io_request_and_map(node, 0, of_node_full_name(node));
197 	if (IS_ERR(cpus->reg))
198 		goto err_free_cpus;
199 
200 	of_property_read_string(node, "clock-output-names", &clk_name);
201 
202 	/* we have a mux, we will have >1 parents */
203 	ret = of_clk_parent_fill(node, parents, SUN9I_CPUS_MAX_PARENTS);
204 
205 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
206 	if (!mux)
207 		goto err_unmap;
208 
209 	/* set up clock properties */
210 	mux->reg = cpus->reg;
211 	mux->shift = SUN9I_CPUS_MUX_SHIFT;
212 	/* un-shifted mask is what mux_clk expects */
213 	mux->mask = SUN9I_CPUS_MUX_MASK >> SUN9I_CPUS_MUX_SHIFT;
214 	mux->lock = &sun9i_a80_cpus_lock;
215 
216 	clk = clk_register_composite(NULL, clk_name, parents, ret,
217 				     &mux->hw, &clk_mux_ops,
218 				     &cpus->hw, &sun9i_a80_cpus_clk_ops,
219 				     NULL, NULL, 0);
220 	if (IS_ERR(clk))
221 		goto err_free_mux;
222 
223 	ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
224 	if (ret)
225 		goto err_unregister;
226 
227 	return;
228 
229 err_unregister:
230 	clk_unregister(clk);
231 err_free_mux:
232 	kfree(mux);
233 err_unmap:
234 	iounmap(cpus->reg);
235 	of_address_to_resource(node, 0, &res);
236 	release_mem_region(res.start, resource_size(&res));
237 err_free_cpus:
238 	kfree(cpus);
239 }
240 CLK_OF_DECLARE(sun9i_a80_cpus, "allwinner,sun9i-a80-cpus-clk",
241 	       sun9i_a80_cpus_setup);
242