1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Chen-Yu Tsai
4  *
5  * Chen-Yu Tsai <wens@csie.org>
6  *
7  * Allwinner A80 CPUS clock driver
8  *
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/clk-provider.h>
13 #include <linux/io.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 
19 static DEFINE_SPINLOCK(sun9i_a80_cpus_lock);
20 
21 /**
22  * sun9i_a80_cpus_clk_setup() - Setup function for a80 cpus composite clk
23  */
24 
25 #define SUN9I_CPUS_MAX_PARENTS		4
26 #define SUN9I_CPUS_MUX_PARENT_PLL4	3
27 #define SUN9I_CPUS_MUX_SHIFT		16
28 #define SUN9I_CPUS_MUX_MASK		GENMASK(17, 16)
29 #define SUN9I_CPUS_MUX_GET_PARENT(reg)	((reg & SUN9I_CPUS_MUX_MASK) >> \
30 						SUN9I_CPUS_MUX_SHIFT)
31 
32 #define SUN9I_CPUS_DIV_SHIFT		4
33 #define SUN9I_CPUS_DIV_MASK		GENMASK(5, 4)
34 #define SUN9I_CPUS_DIV_GET(reg)		((reg & SUN9I_CPUS_DIV_MASK) >> \
35 						SUN9I_CPUS_DIV_SHIFT)
36 #define SUN9I_CPUS_DIV_SET(reg, div)	((reg & ~SUN9I_CPUS_DIV_MASK) | \
37 						(div << SUN9I_CPUS_DIV_SHIFT))
38 #define SUN9I_CPUS_PLL4_DIV_SHIFT	8
39 #define SUN9I_CPUS_PLL4_DIV_MASK	GENMASK(12, 8)
40 #define SUN9I_CPUS_PLL4_DIV_GET(reg)	((reg & SUN9I_CPUS_PLL4_DIV_MASK) >> \
41 						SUN9I_CPUS_PLL4_DIV_SHIFT)
42 #define SUN9I_CPUS_PLL4_DIV_SET(reg, div) ((reg & ~SUN9I_CPUS_PLL4_DIV_MASK) | \
43 						(div << SUN9I_CPUS_PLL4_DIV_SHIFT))
44 
45 struct sun9i_a80_cpus_clk {
46 	struct clk_hw hw;
47 	void __iomem *reg;
48 };
49 
50 #define to_sun9i_a80_cpus_clk(_hw) container_of(_hw, struct sun9i_a80_cpus_clk, hw)
51 
52 static unsigned long sun9i_a80_cpus_clk_recalc_rate(struct clk_hw *hw,
53 						    unsigned long parent_rate)
54 {
55 	struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw);
56 	unsigned long rate;
57 	u32 reg;
58 
59 	/* Fetch the register value */
60 	reg = readl(cpus->reg);
61 
62 	/* apply pre-divider first if parent is pll4 */
63 	if (SUN9I_CPUS_MUX_GET_PARENT(reg) == SUN9I_CPUS_MUX_PARENT_PLL4)
64 		parent_rate /= SUN9I_CPUS_PLL4_DIV_GET(reg) + 1;
65 
66 	/* clk divider */
67 	rate = parent_rate / (SUN9I_CPUS_DIV_GET(reg) + 1);
68 
69 	return rate;
70 }
71 
72 static long sun9i_a80_cpus_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
73 				     u8 parent, unsigned long parent_rate)
74 {
75 	u8 div, pre_div = 1;
76 
77 	/*
78 	 * clock can only divide, so we will never be able to achieve
79 	 * frequencies higher than the parent frequency
80 	 */
81 	if (parent_rate && rate > parent_rate)
82 		rate = parent_rate;
83 
84 	div = DIV_ROUND_UP(parent_rate, rate);
85 
86 	/* calculate pre-divider if parent is pll4 */
87 	if (parent == SUN9I_CPUS_MUX_PARENT_PLL4 && div > 4) {
88 		/* pre-divider is 1 ~ 32 */
89 		if (div < 32) {
90 			pre_div = div;
91 			div = 1;
92 		} else if (div < 64) {
93 			pre_div = DIV_ROUND_UP(div, 2);
94 			div = 2;
95 		} else if (div < 96) {
96 			pre_div = DIV_ROUND_UP(div, 3);
97 			div = 3;
98 		} else {
99 			pre_div = DIV_ROUND_UP(div, 4);
100 			div = 4;
101 		}
102 	}
103 
104 	/* we were asked to pass back divider values */
105 	if (divp) {
106 		*divp = div - 1;
107 		*pre_divp = pre_div - 1;
108 	}
109 
110 	return parent_rate / pre_div / div;
111 }
112 
113 static int sun9i_a80_cpus_clk_determine_rate(struct clk_hw *clk,
114 					     struct clk_rate_request *req)
115 {
116 	struct clk_hw *parent, *best_parent = NULL;
117 	int i, num_parents;
118 	unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
119 	unsigned long rate = req->rate;
120 
121 	/* find the parent that can help provide the fastest rate <= rate */
122 	num_parents = clk_hw_get_num_parents(clk);
123 	for (i = 0; i < num_parents; i++) {
124 		parent = clk_hw_get_parent_by_index(clk, i);
125 		if (!parent)
126 			continue;
127 		if (clk_hw_get_flags(clk) & CLK_SET_RATE_PARENT)
128 			parent_rate = clk_hw_round_rate(parent, rate);
129 		else
130 			parent_rate = clk_hw_get_rate(parent);
131 
132 		child_rate = sun9i_a80_cpus_clk_round(rate, NULL, NULL, i,
133 						      parent_rate);
134 
135 		if (child_rate <= rate && child_rate > best_child_rate) {
136 			best_parent = parent;
137 			best = parent_rate;
138 			best_child_rate = child_rate;
139 		}
140 	}
141 
142 	if (!best_parent)
143 		return -EINVAL;
144 
145 	req->best_parent_hw = best_parent;
146 	req->best_parent_rate = best;
147 	req->rate = best_child_rate;
148 
149 	return 0;
150 }
151 
152 static int sun9i_a80_cpus_clk_set_rate(struct clk_hw *hw, unsigned long rate,
153 				       unsigned long parent_rate)
154 {
155 	struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw);
156 	unsigned long flags;
157 	u8 div, pre_div, parent;
158 	u32 reg;
159 
160 	spin_lock_irqsave(&sun9i_a80_cpus_lock, flags);
161 
162 	reg = readl(cpus->reg);
163 
164 	/* need to know which parent is used to apply pre-divider */
165 	parent = SUN9I_CPUS_MUX_GET_PARENT(reg);
166 	sun9i_a80_cpus_clk_round(rate, &div, &pre_div, parent, parent_rate);
167 
168 	reg = SUN9I_CPUS_DIV_SET(reg, div);
169 	reg = SUN9I_CPUS_PLL4_DIV_SET(reg, pre_div);
170 	writel(reg, cpus->reg);
171 
172 	spin_unlock_irqrestore(&sun9i_a80_cpus_lock, flags);
173 
174 	return 0;
175 }
176 
177 static const struct clk_ops sun9i_a80_cpus_clk_ops = {
178 	.determine_rate	= sun9i_a80_cpus_clk_determine_rate,
179 	.recalc_rate	= sun9i_a80_cpus_clk_recalc_rate,
180 	.set_rate	= sun9i_a80_cpus_clk_set_rate,
181 };
182 
183 static void sun9i_a80_cpus_setup(struct device_node *node)
184 {
185 	const char *clk_name = node->name;
186 	const char *parents[SUN9I_CPUS_MAX_PARENTS];
187 	struct resource res;
188 	struct sun9i_a80_cpus_clk *cpus;
189 	struct clk_mux *mux;
190 	struct clk *clk;
191 	int ret;
192 
193 	cpus = kzalloc(sizeof(*cpus), GFP_KERNEL);
194 	if (!cpus)
195 		return;
196 
197 	cpus->reg = of_io_request_and_map(node, 0, of_node_full_name(node));
198 	if (IS_ERR(cpus->reg))
199 		goto err_free_cpus;
200 
201 	of_property_read_string(node, "clock-output-names", &clk_name);
202 
203 	/* we have a mux, we will have >1 parents */
204 	ret = of_clk_parent_fill(node, parents, SUN9I_CPUS_MAX_PARENTS);
205 
206 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
207 	if (!mux)
208 		goto err_unmap;
209 
210 	/* set up clock properties */
211 	mux->reg = cpus->reg;
212 	mux->shift = SUN9I_CPUS_MUX_SHIFT;
213 	/* un-shifted mask is what mux_clk expects */
214 	mux->mask = SUN9I_CPUS_MUX_MASK >> SUN9I_CPUS_MUX_SHIFT;
215 	mux->lock = &sun9i_a80_cpus_lock;
216 
217 	clk = clk_register_composite(NULL, clk_name, parents, ret,
218 				     &mux->hw, &clk_mux_ops,
219 				     &cpus->hw, &sun9i_a80_cpus_clk_ops,
220 				     NULL, NULL, 0);
221 	if (IS_ERR(clk))
222 		goto err_free_mux;
223 
224 	ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
225 	if (ret)
226 		goto err_unregister;
227 
228 	return;
229 
230 err_unregister:
231 	clk_unregister(clk);
232 err_free_mux:
233 	kfree(mux);
234 err_unmap:
235 	iounmap(cpus->reg);
236 	of_address_to_resource(node, 0, &res);
237 	release_mem_region(res.start, resource_size(&res));
238 err_free_cpus:
239 	kfree(cpus);
240 }
241 CLK_OF_DECLARE(sun9i_a80_cpus, "allwinner,sun9i-a80-cpus-clk",
242 	       sun9i_a80_cpus_setup);
243