xref: /openbmc/linux/drivers/clk/rockchip/clk.c (revision cc8bbe1a)
1 /*
2  * Copyright (c) 2014 MundoReader S.L.
3  * Author: Heiko Stuebner <heiko@sntech.de>
4  *
5  * based on
6  *
7  * samsung/clk.c
8  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
9  * Copyright (c) 2013 Linaro Ltd.
10  * Author: Thomas Abraham <thomas.ab@samsung.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  */
22 
23 #include <linux/slab.h>
24 #include <linux/clk.h>
25 #include <linux/clk-provider.h>
26 #include <linux/mfd/syscon.h>
27 #include <linux/regmap.h>
28 #include <linux/reboot.h>
29 #include "clk.h"
30 
31 /**
32  * Register a clock branch.
33  * Most clock branches have a form like
34  *
35  * src1 --|--\
36  *        |M |--[GATE]-[DIV]-
37  * src2 --|--/
38  *
39  * sometimes without one of those components.
40  */
41 static struct clk *rockchip_clk_register_branch(const char *name,
42 		const char *const *parent_names, u8 num_parents, void __iomem *base,
43 		int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
44 		u8 div_shift, u8 div_width, u8 div_flags,
45 		struct clk_div_table *div_table, int gate_offset,
46 		u8 gate_shift, u8 gate_flags, unsigned long flags,
47 		spinlock_t *lock)
48 {
49 	struct clk *clk;
50 	struct clk_mux *mux = NULL;
51 	struct clk_gate *gate = NULL;
52 	struct clk_divider *div = NULL;
53 	const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
54 			     *gate_ops = NULL;
55 
56 	if (num_parents > 1) {
57 		mux = kzalloc(sizeof(*mux), GFP_KERNEL);
58 		if (!mux)
59 			return ERR_PTR(-ENOMEM);
60 
61 		mux->reg = base + muxdiv_offset;
62 		mux->shift = mux_shift;
63 		mux->mask = BIT(mux_width) - 1;
64 		mux->flags = mux_flags;
65 		mux->lock = lock;
66 		mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
67 							: &clk_mux_ops;
68 	}
69 
70 	if (gate_offset >= 0) {
71 		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
72 		if (!gate)
73 			return ERR_PTR(-ENOMEM);
74 
75 		gate->flags = gate_flags;
76 		gate->reg = base + gate_offset;
77 		gate->bit_idx = gate_shift;
78 		gate->lock = lock;
79 		gate_ops = &clk_gate_ops;
80 	}
81 
82 	if (div_width > 0) {
83 		div = kzalloc(sizeof(*div), GFP_KERNEL);
84 		if (!div)
85 			return ERR_PTR(-ENOMEM);
86 
87 		div->flags = div_flags;
88 		div->reg = base + muxdiv_offset;
89 		div->shift = div_shift;
90 		div->width = div_width;
91 		div->lock = lock;
92 		div->table = div_table;
93 		div_ops = &clk_divider_ops;
94 	}
95 
96 	clk = clk_register_composite(NULL, name, parent_names, num_parents,
97 				     mux ? &mux->hw : NULL, mux_ops,
98 				     div ? &div->hw : NULL, div_ops,
99 				     gate ? &gate->hw : NULL, gate_ops,
100 				     flags);
101 
102 	return clk;
103 }
104 
105 struct rockchip_clk_frac {
106 	struct notifier_block			clk_nb;
107 	struct clk_fractional_divider		div;
108 	struct clk_gate				gate;
109 
110 	struct clk_mux				mux;
111 	const struct clk_ops			*mux_ops;
112 	int					mux_frac_idx;
113 
114 	bool					rate_change_remuxed;
115 	int					rate_change_idx;
116 };
117 
118 #define to_rockchip_clk_frac_nb(nb) \
119 			container_of(nb, struct rockchip_clk_frac, clk_nb)
120 
121 static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
122 					 unsigned long event, void *data)
123 {
124 	struct clk_notifier_data *ndata = data;
125 	struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
126 	struct clk_mux *frac_mux = &frac->mux;
127 	int ret = 0;
128 
129 	pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
130 		 __func__, event, ndata->old_rate, ndata->new_rate);
131 	if (event == PRE_RATE_CHANGE) {
132 		frac->rate_change_idx = frac->mux_ops->get_parent(&frac_mux->hw);
133 		if (frac->rate_change_idx != frac->mux_frac_idx) {
134 			frac->mux_ops->set_parent(&frac_mux->hw, frac->mux_frac_idx);
135 			frac->rate_change_remuxed = 1;
136 		}
137 	} else if (event == POST_RATE_CHANGE) {
138 		/*
139 		 * The POST_RATE_CHANGE notifier runs directly after the
140 		 * divider clock is set in clk_change_rate, so we'll have
141 		 * remuxed back to the original parent before clk_change_rate
142 		 * reaches the mux itself.
143 		 */
144 		if (frac->rate_change_remuxed) {
145 			frac->mux_ops->set_parent(&frac_mux->hw, frac->rate_change_idx);
146 			frac->rate_change_remuxed = 0;
147 		}
148 	}
149 
150 	return notifier_from_errno(ret);
151 }
152 
153 static struct clk *rockchip_clk_register_frac_branch(const char *name,
154 		const char *const *parent_names, u8 num_parents,
155 		void __iomem *base, int muxdiv_offset, u8 div_flags,
156 		int gate_offset, u8 gate_shift, u8 gate_flags,
157 		unsigned long flags, struct rockchip_clk_branch *child,
158 		spinlock_t *lock)
159 {
160 	struct rockchip_clk_frac *frac;
161 	struct clk *clk;
162 	struct clk_gate *gate = NULL;
163 	struct clk_fractional_divider *div = NULL;
164 	const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
165 
166 	if (muxdiv_offset < 0)
167 		return ERR_PTR(-EINVAL);
168 
169 	if (child && child->branch_type != branch_mux) {
170 		pr_err("%s: fractional child clock for %s can only be a mux\n",
171 		       __func__, name);
172 		return ERR_PTR(-EINVAL);
173 	}
174 
175 	frac = kzalloc(sizeof(*frac), GFP_KERNEL);
176 	if (!frac)
177 		return ERR_PTR(-ENOMEM);
178 
179 	if (gate_offset >= 0) {
180 		gate = &frac->gate;
181 		gate->flags = gate_flags;
182 		gate->reg = base + gate_offset;
183 		gate->bit_idx = gate_shift;
184 		gate->lock = lock;
185 		gate_ops = &clk_gate_ops;
186 	}
187 
188 	div = &frac->div;
189 	div->flags = div_flags;
190 	div->reg = base + muxdiv_offset;
191 	div->mshift = 16;
192 	div->mwidth = 16;
193 	div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
194 	div->nshift = 0;
195 	div->nwidth = 16;
196 	div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
197 	div->lock = lock;
198 	div_ops = &clk_fractional_divider_ops;
199 
200 	clk = clk_register_composite(NULL, name, parent_names, num_parents,
201 				     NULL, NULL,
202 				     &div->hw, div_ops,
203 				     gate ? &gate->hw : NULL, gate_ops,
204 				     flags | CLK_SET_RATE_UNGATE);
205 	if (IS_ERR(clk)) {
206 		kfree(frac);
207 		return clk;
208 	}
209 
210 	if (child) {
211 		struct clk_mux *frac_mux = &frac->mux;
212 		struct clk_init_data init;
213 		struct clk *mux_clk;
214 		int i, ret;
215 
216 		frac->mux_frac_idx = -1;
217 		for (i = 0; i < child->num_parents; i++) {
218 			if (!strcmp(name, child->parent_names[i])) {
219 				pr_debug("%s: found fractional parent in mux at pos %d\n",
220 					 __func__, i);
221 				frac->mux_frac_idx = i;
222 				break;
223 			}
224 		}
225 
226 		frac->mux_ops = &clk_mux_ops;
227 		frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
228 
229 		frac_mux->reg = base + child->muxdiv_offset;
230 		frac_mux->shift = child->mux_shift;
231 		frac_mux->mask = BIT(child->mux_width) - 1;
232 		frac_mux->flags = child->mux_flags;
233 		frac_mux->lock = lock;
234 		frac_mux->hw.init = &init;
235 
236 		init.name = child->name;
237 		init.flags = child->flags | CLK_SET_RATE_PARENT;
238 		init.ops = frac->mux_ops;
239 		init.parent_names = child->parent_names;
240 		init.num_parents = child->num_parents;
241 
242 		mux_clk = clk_register(NULL, &frac_mux->hw);
243 		if (IS_ERR(mux_clk))
244 			return clk;
245 
246 		rockchip_clk_add_lookup(mux_clk, child->id);
247 
248 		/* notifier on the fraction divider to catch rate changes */
249 		if (frac->mux_frac_idx >= 0) {
250 			ret = clk_notifier_register(clk, &frac->clk_nb);
251 			if (ret)
252 				pr_err("%s: failed to register clock notifier for %s\n",
253 						__func__, name);
254 		} else {
255 			pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
256 				__func__, name, child->name);
257 		}
258 	}
259 
260 	return clk;
261 }
262 
263 static DEFINE_SPINLOCK(clk_lock);
264 static struct clk **clk_table;
265 static void __iomem *reg_base;
266 static struct clk_onecell_data clk_data;
267 static struct device_node *cru_node;
268 static struct regmap *grf;
269 
270 void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
271 			      unsigned long nr_clks)
272 {
273 	reg_base = base;
274 	cru_node = np;
275 	grf = ERR_PTR(-EPROBE_DEFER);
276 
277 	clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
278 	if (!clk_table)
279 		pr_err("%s: could not allocate clock lookup table\n", __func__);
280 
281 	clk_data.clks = clk_table;
282 	clk_data.clk_num = nr_clks;
283 	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
284 }
285 
286 struct regmap *rockchip_clk_get_grf(void)
287 {
288 	if (IS_ERR(grf))
289 		grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
290 	return grf;
291 }
292 
293 void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
294 {
295 	if (clk_table && id)
296 		clk_table[id] = clk;
297 }
298 
299 void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
300 				unsigned int nr_pll, int grf_lock_offset)
301 {
302 	struct clk *clk;
303 	int idx;
304 
305 	for (idx = 0; idx < nr_pll; idx++, list++) {
306 		clk = rockchip_clk_register_pll(list->type, list->name,
307 				list->parent_names, list->num_parents,
308 				reg_base, list->con_offset, grf_lock_offset,
309 				list->lock_shift, list->mode_offset,
310 				list->mode_shift, list->rate_table,
311 				list->pll_flags, &clk_lock);
312 		if (IS_ERR(clk)) {
313 			pr_err("%s: failed to register clock %s\n", __func__,
314 				list->name);
315 			continue;
316 		}
317 
318 		rockchip_clk_add_lookup(clk, list->id);
319 	}
320 }
321 
322 void __init rockchip_clk_register_branches(
323 				      struct rockchip_clk_branch *list,
324 				      unsigned int nr_clk)
325 {
326 	struct clk *clk = NULL;
327 	unsigned int idx;
328 	unsigned long flags;
329 
330 	for (idx = 0; idx < nr_clk; idx++, list++) {
331 		flags = list->flags;
332 
333 		/* catch simple muxes */
334 		switch (list->branch_type) {
335 		case branch_mux:
336 			clk = clk_register_mux(NULL, list->name,
337 				list->parent_names, list->num_parents,
338 				flags, reg_base + list->muxdiv_offset,
339 				list->mux_shift, list->mux_width,
340 				list->mux_flags, &clk_lock);
341 			break;
342 		case branch_divider:
343 			if (list->div_table)
344 				clk = clk_register_divider_table(NULL,
345 					list->name, list->parent_names[0],
346 					flags, reg_base + list->muxdiv_offset,
347 					list->div_shift, list->div_width,
348 					list->div_flags, list->div_table,
349 					&clk_lock);
350 			else
351 				clk = clk_register_divider(NULL, list->name,
352 					list->parent_names[0], flags,
353 					reg_base + list->muxdiv_offset,
354 					list->div_shift, list->div_width,
355 					list->div_flags, &clk_lock);
356 			break;
357 		case branch_fraction_divider:
358 			clk = rockchip_clk_register_frac_branch(list->name,
359 				list->parent_names, list->num_parents,
360 				reg_base, list->muxdiv_offset, list->div_flags,
361 				list->gate_offset, list->gate_shift,
362 				list->gate_flags, flags, list->child,
363 				&clk_lock);
364 			break;
365 		case branch_gate:
366 			flags |= CLK_SET_RATE_PARENT;
367 
368 			clk = clk_register_gate(NULL, list->name,
369 				list->parent_names[0], flags,
370 				reg_base + list->gate_offset,
371 				list->gate_shift, list->gate_flags, &clk_lock);
372 			break;
373 		case branch_composite:
374 			clk = rockchip_clk_register_branch(list->name,
375 				list->parent_names, list->num_parents,
376 				reg_base, list->muxdiv_offset, list->mux_shift,
377 				list->mux_width, list->mux_flags,
378 				list->div_shift, list->div_width,
379 				list->div_flags, list->div_table,
380 				list->gate_offset, list->gate_shift,
381 				list->gate_flags, flags, &clk_lock);
382 			break;
383 		case branch_mmc:
384 			clk = rockchip_clk_register_mmc(
385 				list->name,
386 				list->parent_names, list->num_parents,
387 				reg_base + list->muxdiv_offset,
388 				list->div_shift
389 			);
390 			break;
391 		case branch_inverter:
392 			clk = rockchip_clk_register_inverter(
393 				list->name, list->parent_names,
394 				list->num_parents,
395 				reg_base + list->muxdiv_offset,
396 				list->div_shift, list->div_flags, &clk_lock);
397 			break;
398 		}
399 
400 		/* none of the cases above matched */
401 		if (!clk) {
402 			pr_err("%s: unknown clock type %d\n",
403 			       __func__, list->branch_type);
404 			continue;
405 		}
406 
407 		if (IS_ERR(clk)) {
408 			pr_err("%s: failed to register clock %s: %ld\n",
409 			       __func__, list->name, PTR_ERR(clk));
410 			continue;
411 		}
412 
413 		rockchip_clk_add_lookup(clk, list->id);
414 	}
415 }
416 
417 void __init rockchip_clk_register_armclk(unsigned int lookup_id,
418 			const char *name, const char *const *parent_names,
419 			u8 num_parents,
420 			const struct rockchip_cpuclk_reg_data *reg_data,
421 			const struct rockchip_cpuclk_rate_table *rates,
422 			int nrates)
423 {
424 	struct clk *clk;
425 
426 	clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
427 					   reg_data, rates, nrates, reg_base,
428 					   &clk_lock);
429 	if (IS_ERR(clk)) {
430 		pr_err("%s: failed to register clock %s: %ld\n",
431 		       __func__, name, PTR_ERR(clk));
432 		return;
433 	}
434 
435 	rockchip_clk_add_lookup(clk, lookup_id);
436 }
437 
438 void __init rockchip_clk_protect_critical(const char *const clocks[],
439 					  int nclocks)
440 {
441 	int i;
442 
443 	/* Protect the clocks that needs to stay on */
444 	for (i = 0; i < nclocks; i++) {
445 		struct clk *clk = __clk_lookup(clocks[i]);
446 
447 		if (clk)
448 			clk_prepare_enable(clk);
449 	}
450 }
451 
452 static unsigned int reg_restart;
453 static void (*cb_restart)(void);
454 static int rockchip_restart_notify(struct notifier_block *this,
455 				   unsigned long mode, void *cmd)
456 {
457 	if (cb_restart)
458 		cb_restart();
459 
460 	writel(0xfdb9, reg_base + reg_restart);
461 	return NOTIFY_DONE;
462 }
463 
464 static struct notifier_block rockchip_restart_handler = {
465 	.notifier_call = rockchip_restart_notify,
466 	.priority = 128,
467 };
468 
469 void __init rockchip_register_restart_notifier(unsigned int reg, void (*cb)(void))
470 {
471 	int ret;
472 
473 	reg_restart = reg;
474 	cb_restart = cb;
475 	ret = register_restart_handler(&rockchip_restart_handler);
476 	if (ret)
477 		pr_err("%s: cannot register restart handler, %d\n",
478 		       __func__, ret);
479 }
480