xref: /openbmc/linux/drivers/clk/ti/dpll.c (revision a59511d1)
1 /*
2  * OMAP DPLL clock support
3  *
4  * Copyright (C) 2013 Texas Instruments, Inc.
5  *
6  * Tero Kristo <t-kristo@ti.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13  * kind, whether express or implied; without even the implied warranty
14  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
20 #include <linux/slab.h>
21 #include <linux/err.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/clk/ti.h>
25 #include "clock.h"
26 
27 #undef pr_fmt
28 #define pr_fmt(fmt) "%s: " fmt, __func__
29 
30 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
31 	defined(CONFIG_SOC_DRA7XX)
32 static const struct clk_ops dpll_m4xen_ck_ops = {
33 	.enable		= &omap3_noncore_dpll_enable,
34 	.disable	= &omap3_noncore_dpll_disable,
35 	.recalc_rate	= &omap4_dpll_regm4xen_recalc,
36 	.round_rate	= &omap4_dpll_regm4xen_round_rate,
37 	.set_rate	= &omap3_noncore_dpll_set_rate,
38 	.set_parent	= &omap3_noncore_dpll_set_parent,
39 	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
40 	.determine_rate	= &omap4_dpll_regm4xen_determine_rate,
41 	.get_parent	= &omap2_init_dpll_parent,
42 };
43 #else
44 static const struct clk_ops dpll_m4xen_ck_ops = {};
45 #endif
46 
47 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
48 	defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
49 	defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
50 static const struct clk_ops dpll_core_ck_ops = {
51 	.recalc_rate	= &omap3_dpll_recalc,
52 	.get_parent	= &omap2_init_dpll_parent,
53 };
54 
55 static const struct clk_ops dpll_ck_ops = {
56 	.enable		= &omap3_noncore_dpll_enable,
57 	.disable	= &omap3_noncore_dpll_disable,
58 	.recalc_rate	= &omap3_dpll_recalc,
59 	.round_rate	= &omap2_dpll_round_rate,
60 	.set_rate	= &omap3_noncore_dpll_set_rate,
61 	.set_parent	= &omap3_noncore_dpll_set_parent,
62 	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
63 	.determine_rate	= &omap3_noncore_dpll_determine_rate,
64 	.get_parent	= &omap2_init_dpll_parent,
65 };
66 
67 static const struct clk_ops dpll_no_gate_ck_ops = {
68 	.recalc_rate	= &omap3_dpll_recalc,
69 	.get_parent	= &omap2_init_dpll_parent,
70 	.round_rate	= &omap2_dpll_round_rate,
71 	.set_rate	= &omap3_noncore_dpll_set_rate,
72 	.set_parent	= &omap3_noncore_dpll_set_parent,
73 	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
74 	.determine_rate	= &omap3_noncore_dpll_determine_rate,
75 };
76 #else
77 static const struct clk_ops dpll_core_ck_ops = {};
78 static const struct clk_ops dpll_ck_ops = {};
79 static const struct clk_ops dpll_no_gate_ck_ops = {};
80 const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
81 #endif
82 
83 #ifdef CONFIG_ARCH_OMAP2
84 static const struct clk_ops omap2_dpll_core_ck_ops = {
85 	.get_parent	= &omap2_init_dpll_parent,
86 	.recalc_rate	= &omap2_dpllcore_recalc,
87 	.round_rate	= &omap2_dpll_round_rate,
88 	.set_rate	= &omap2_reprogram_dpllcore,
89 };
90 #else
91 static const struct clk_ops omap2_dpll_core_ck_ops = {};
92 #endif
93 
94 #ifdef CONFIG_ARCH_OMAP3
95 static const struct clk_ops omap3_dpll_core_ck_ops = {
96 	.get_parent	= &omap2_init_dpll_parent,
97 	.recalc_rate	= &omap3_dpll_recalc,
98 	.round_rate	= &omap2_dpll_round_rate,
99 };
100 #else
101 static const struct clk_ops omap3_dpll_core_ck_ops = {};
102 #endif
103 
104 #ifdef CONFIG_ARCH_OMAP3
105 static const struct clk_ops omap3_dpll_ck_ops = {
106 	.enable		= &omap3_noncore_dpll_enable,
107 	.disable	= &omap3_noncore_dpll_disable,
108 	.get_parent	= &omap2_init_dpll_parent,
109 	.recalc_rate	= &omap3_dpll_recalc,
110 	.set_rate	= &omap3_noncore_dpll_set_rate,
111 	.set_parent	= &omap3_noncore_dpll_set_parent,
112 	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
113 	.determine_rate	= &omap3_noncore_dpll_determine_rate,
114 	.round_rate	= &omap2_dpll_round_rate,
115 };
116 
117 static const struct clk_ops omap3_dpll_per_ck_ops = {
118 	.enable		= &omap3_noncore_dpll_enable,
119 	.disable	= &omap3_noncore_dpll_disable,
120 	.get_parent	= &omap2_init_dpll_parent,
121 	.recalc_rate	= &omap3_dpll_recalc,
122 	.set_rate	= &omap3_dpll4_set_rate,
123 	.set_parent	= &omap3_noncore_dpll_set_parent,
124 	.set_rate_and_parent	= &omap3_dpll4_set_rate_and_parent,
125 	.determine_rate	= &omap3_noncore_dpll_determine_rate,
126 	.round_rate	= &omap2_dpll_round_rate,
127 };
128 #endif
129 
130 static const struct clk_ops dpll_x2_ck_ops = {
131 	.recalc_rate	= &omap3_clkoutx2_recalc,
132 };
133 
134 /**
135  * _register_dpll - low level registration of a DPLL clock
136  * @hw: hardware clock definition for the clock
137  * @node: device node for the clock
138  *
139  * Finalizes DPLL registration process. In case a failure (clk-ref or
140  * clk-bypass is missing), the clock is added to retry list and
141  * the initialization is retried on later stage.
142  */
143 static void __init _register_dpll(struct clk_hw *hw,
144 				  struct device_node *node)
145 {
146 	struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
147 	struct dpll_data *dd = clk_hw->dpll_data;
148 	struct clk *clk;
149 
150 	clk = of_clk_get(node, 0);
151 	if (IS_ERR(clk)) {
152 		pr_debug("clk-ref missing for %s, retry later\n",
153 			 node->name);
154 		if (!ti_clk_retry_init(node, hw, _register_dpll))
155 			return;
156 
157 		goto cleanup;
158 	}
159 
160 	dd->clk_ref = __clk_get_hw(clk);
161 
162 	clk = of_clk_get(node, 1);
163 
164 	if (IS_ERR(clk)) {
165 		pr_debug("clk-bypass missing for %s, retry later\n",
166 			 node->name);
167 		if (!ti_clk_retry_init(node, hw, _register_dpll))
168 			return;
169 
170 		goto cleanup;
171 	}
172 
173 	dd->clk_bypass = __clk_get_hw(clk);
174 
175 	/* register the clock */
176 	clk = clk_register(NULL, &clk_hw->hw);
177 
178 	if (!IS_ERR(clk)) {
179 		omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
180 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
181 		kfree(clk_hw->hw.init->parent_names);
182 		kfree(clk_hw->hw.init);
183 		return;
184 	}
185 
186 cleanup:
187 	kfree(clk_hw->dpll_data);
188 	kfree(clk_hw->hw.init->parent_names);
189 	kfree(clk_hw->hw.init);
190 	kfree(clk_hw);
191 }
192 
193 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
194 static void __iomem *_get_reg(u8 module, u16 offset)
195 {
196 	u32 reg;
197 	struct clk_omap_reg *reg_setup;
198 
199 	reg_setup = (struct clk_omap_reg *)&reg;
200 
201 	reg_setup->index = module;
202 	reg_setup->offset = offset;
203 
204 	return (void __iomem *)reg;
205 }
206 
207 struct clk *ti_clk_register_dpll(struct ti_clk *setup)
208 {
209 	struct clk_hw_omap *clk_hw;
210 	struct clk_init_data init = { NULL };
211 	struct dpll_data *dd;
212 	struct clk *clk;
213 	struct ti_clk_dpll *dpll;
214 	const struct clk_ops *ops = &omap3_dpll_ck_ops;
215 	struct clk *clk_ref;
216 	struct clk *clk_bypass;
217 
218 	dpll = setup->data;
219 
220 	if (dpll->num_parents < 2)
221 		return ERR_PTR(-EINVAL);
222 
223 	clk_ref = clk_get_sys(NULL, dpll->parents[0]);
224 	clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
225 
226 	if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
227 		return ERR_PTR(-EAGAIN);
228 
229 	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
230 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
231 	if (!dd || !clk_hw) {
232 		clk = ERR_PTR(-ENOMEM);
233 		goto cleanup;
234 	}
235 
236 	clk_hw->dpll_data = dd;
237 	clk_hw->ops = &clkhwops_omap3_dpll;
238 	clk_hw->hw.init = &init;
239 	clk_hw->flags = MEMMAP_ADDRESSING;
240 
241 	init.name = setup->name;
242 	init.ops = ops;
243 
244 	init.num_parents = dpll->num_parents;
245 	init.parent_names = dpll->parents;
246 
247 	dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
248 	dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
249 	dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
250 	dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
251 
252 	dd->modes = dpll->modes;
253 	dd->div1_mask = dpll->div1_mask;
254 	dd->idlest_mask = dpll->idlest_mask;
255 	dd->mult_mask = dpll->mult_mask;
256 	dd->autoidle_mask = dpll->autoidle_mask;
257 	dd->enable_mask = dpll->enable_mask;
258 	dd->sddiv_mask = dpll->sddiv_mask;
259 	dd->dco_mask = dpll->dco_mask;
260 	dd->max_divider = dpll->max_divider;
261 	dd->min_divider = dpll->min_divider;
262 	dd->max_multiplier = dpll->max_multiplier;
263 	dd->auto_recal_bit = dpll->auto_recal_bit;
264 	dd->recal_en_bit = dpll->recal_en_bit;
265 	dd->recal_st_bit = dpll->recal_st_bit;
266 
267 	dd->clk_ref = __clk_get_hw(clk_ref);
268 	dd->clk_bypass = __clk_get_hw(clk_bypass);
269 
270 	if (dpll->flags & CLKF_CORE)
271 		ops = &omap3_dpll_core_ck_ops;
272 
273 	if (dpll->flags & CLKF_PER)
274 		ops = &omap3_dpll_per_ck_ops;
275 
276 	if (dpll->flags & CLKF_J_TYPE)
277 		dd->flags |= DPLL_J_TYPE;
278 
279 	clk = clk_register(NULL, &clk_hw->hw);
280 
281 	if (!IS_ERR(clk))
282 		return clk;
283 
284 cleanup:
285 	kfree(dd);
286 	kfree(clk_hw);
287 	return clk;
288 }
289 #endif
290 
291 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
292 	defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
293 	defined(CONFIG_SOC_AM43XX)
294 /**
295  * _register_dpll_x2 - Registers a DPLLx2 clock
296  * @node: device node for this clock
297  * @ops: clk_ops for this clock
298  * @hw_ops: clk_hw_ops for this clock
299  *
300  * Initializes a DPLL x 2 clock from device tree data.
301  */
302 static void _register_dpll_x2(struct device_node *node,
303 			      const struct clk_ops *ops,
304 			      const struct clk_hw_omap_ops *hw_ops)
305 {
306 	struct clk *clk;
307 	struct clk_init_data init = { NULL };
308 	struct clk_hw_omap *clk_hw;
309 	const char *name = node->name;
310 	const char *parent_name;
311 
312 	parent_name = of_clk_get_parent_name(node, 0);
313 	if (!parent_name) {
314 		pr_err("%s must have parent\n", node->name);
315 		return;
316 	}
317 
318 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
319 	if (!clk_hw)
320 		return;
321 
322 	clk_hw->ops = hw_ops;
323 	clk_hw->hw.init = &init;
324 
325 	init.name = name;
326 	init.ops = ops;
327 	init.parent_names = &parent_name;
328 	init.num_parents = 1;
329 
330 	/* register the clock */
331 	clk = clk_register(NULL, &clk_hw->hw);
332 
333 	if (IS_ERR(clk)) {
334 		kfree(clk_hw);
335 	} else {
336 		omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
337 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
338 	}
339 }
340 #endif
341 
342 /**
343  * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
344  * @node: device node containing the DPLL info
345  * @ops: ops for the DPLL
346  * @ddt: DPLL data template to use
347  *
348  * Initializes a DPLL clock from device tree data.
349  */
350 static void __init of_ti_dpll_setup(struct device_node *node,
351 				    const struct clk_ops *ops,
352 				    const struct dpll_data *ddt)
353 {
354 	struct clk_hw_omap *clk_hw = NULL;
355 	struct clk_init_data *init = NULL;
356 	const char **parent_names = NULL;
357 	struct dpll_data *dd = NULL;
358 	u8 dpll_mode = 0;
359 
360 	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
361 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
362 	init = kzalloc(sizeof(*init), GFP_KERNEL);
363 	if (!dd || !clk_hw || !init)
364 		goto cleanup;
365 
366 	memcpy(dd, ddt, sizeof(*dd));
367 
368 	clk_hw->dpll_data = dd;
369 	clk_hw->ops = &clkhwops_omap3_dpll;
370 	clk_hw->hw.init = init;
371 	clk_hw->flags = MEMMAP_ADDRESSING;
372 
373 	init->name = node->name;
374 	init->ops = ops;
375 
376 	init->num_parents = of_clk_get_parent_count(node);
377 	if (!init->num_parents) {
378 		pr_err("%s must have parent(s)\n", node->name);
379 		goto cleanup;
380 	}
381 
382 	parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
383 	if (!parent_names)
384 		goto cleanup;
385 
386 	of_clk_parent_fill(node, parent_names, init->num_parents);
387 
388 	init->parent_names = parent_names;
389 
390 	dd->control_reg = ti_clk_get_reg_addr(node, 0);
391 
392 	/*
393 	 * Special case for OMAP2 DPLL, register order is different due to
394 	 * missing idlest_reg, also clkhwops is different. Detected from
395 	 * missing idlest_mask.
396 	 */
397 	if (!dd->idlest_mask) {
398 		dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1);
399 #ifdef CONFIG_ARCH_OMAP2
400 		clk_hw->ops = &clkhwops_omap2xxx_dpll;
401 		omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
402 #endif
403 	} else {
404 		dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
405 		if (IS_ERR(dd->idlest_reg))
406 			goto cleanup;
407 
408 		dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
409 	}
410 
411 	if (IS_ERR(dd->control_reg) || IS_ERR(dd->mult_div1_reg))
412 		goto cleanup;
413 
414 	if (dd->autoidle_mask) {
415 		dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
416 		if (IS_ERR(dd->autoidle_reg))
417 			goto cleanup;
418 	}
419 
420 	if (of_property_read_bool(node, "ti,low-power-stop"))
421 		dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
422 
423 	if (of_property_read_bool(node, "ti,low-power-bypass"))
424 		dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
425 
426 	if (of_property_read_bool(node, "ti,lock"))
427 		dpll_mode |= 1 << DPLL_LOCKED;
428 
429 	if (dpll_mode)
430 		dd->modes = dpll_mode;
431 
432 	_register_dpll(&clk_hw->hw, node);
433 	return;
434 
435 cleanup:
436 	kfree(dd);
437 	kfree(parent_names);
438 	kfree(init);
439 	kfree(clk_hw);
440 }
441 
442 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
443 	defined(CONFIG_SOC_DRA7XX)
444 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
445 {
446 	_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
447 }
448 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
449 	       of_ti_omap4_dpll_x2_setup);
450 #endif
451 
452 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
453 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
454 {
455 	_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
456 }
457 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
458 	       of_ti_am3_dpll_x2_setup);
459 #endif
460 
461 #ifdef CONFIG_ARCH_OMAP3
462 static void __init of_ti_omap3_dpll_setup(struct device_node *node)
463 {
464 	const struct dpll_data dd = {
465 		.idlest_mask = 0x1,
466 		.enable_mask = 0x7,
467 		.autoidle_mask = 0x7,
468 		.mult_mask = 0x7ff << 8,
469 		.div1_mask = 0x7f,
470 		.max_multiplier = 2047,
471 		.max_divider = 128,
472 		.min_divider = 1,
473 		.freqsel_mask = 0xf0,
474 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
475 	};
476 
477 	of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
478 }
479 CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
480 	       of_ti_omap3_dpll_setup);
481 
482 static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
483 {
484 	const struct dpll_data dd = {
485 		.idlest_mask = 0x1,
486 		.enable_mask = 0x7,
487 		.autoidle_mask = 0x7,
488 		.mult_mask = 0x7ff << 16,
489 		.div1_mask = 0x7f << 8,
490 		.max_multiplier = 2047,
491 		.max_divider = 128,
492 		.min_divider = 1,
493 		.freqsel_mask = 0xf0,
494 	};
495 
496 	of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
497 }
498 CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
499 	       of_ti_omap3_core_dpll_setup);
500 
501 static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
502 {
503 	const struct dpll_data dd = {
504 		.idlest_mask = 0x1 << 1,
505 		.enable_mask = 0x7 << 16,
506 		.autoidle_mask = 0x7 << 3,
507 		.mult_mask = 0x7ff << 8,
508 		.div1_mask = 0x7f,
509 		.max_multiplier = 2047,
510 		.max_divider = 128,
511 		.min_divider = 1,
512 		.freqsel_mask = 0xf00000,
513 		.modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
514 	};
515 
516 	of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
517 }
518 CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
519 	       of_ti_omap3_per_dpll_setup);
520 
521 static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
522 {
523 	const struct dpll_data dd = {
524 		.idlest_mask = 0x1 << 1,
525 		.enable_mask = 0x7 << 16,
526 		.autoidle_mask = 0x7 << 3,
527 		.mult_mask = 0xfff << 8,
528 		.div1_mask = 0x7f,
529 		.max_multiplier = 4095,
530 		.max_divider = 128,
531 		.min_divider = 1,
532 		.sddiv_mask = 0xff << 24,
533 		.dco_mask = 0xe << 20,
534 		.flags = DPLL_J_TYPE,
535 		.modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
536 	};
537 
538 	of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
539 }
540 CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
541 	       of_ti_omap3_per_jtype_dpll_setup);
542 #endif
543 
544 static void __init of_ti_omap4_dpll_setup(struct device_node *node)
545 {
546 	const struct dpll_data dd = {
547 		.idlest_mask = 0x1,
548 		.enable_mask = 0x7,
549 		.autoidle_mask = 0x7,
550 		.mult_mask = 0x7ff << 8,
551 		.div1_mask = 0x7f,
552 		.max_multiplier = 2047,
553 		.max_divider = 128,
554 		.min_divider = 1,
555 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
556 	};
557 
558 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
559 }
560 CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
561 	       of_ti_omap4_dpll_setup);
562 
563 static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
564 {
565 	const struct dpll_data dd = {
566 		.idlest_mask = 0x1,
567 		.enable_mask = 0x7,
568 		.autoidle_mask = 0x7,
569 		.mult_mask = 0x7ff << 8,
570 		.div1_mask = 0x7f,
571 		.max_multiplier = 2047,
572 		.max_divider = 128,
573 		.dcc_mask = BIT(22),
574 		.dcc_rate = 1400000000, /* DCC beyond 1.4GHz */
575 		.min_divider = 1,
576 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
577 	};
578 
579 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
580 }
581 CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
582 	       of_ti_omap5_mpu_dpll_setup);
583 
584 static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
585 {
586 	const struct dpll_data dd = {
587 		.idlest_mask = 0x1,
588 		.enable_mask = 0x7,
589 		.autoidle_mask = 0x7,
590 		.mult_mask = 0x7ff << 8,
591 		.div1_mask = 0x7f,
592 		.max_multiplier = 2047,
593 		.max_divider = 128,
594 		.min_divider = 1,
595 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
596 	};
597 
598 	of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
599 }
600 CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
601 	       of_ti_omap4_core_dpll_setup);
602 
603 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
604 	defined(CONFIG_SOC_DRA7XX)
605 static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
606 {
607 	const struct dpll_data dd = {
608 		.idlest_mask = 0x1,
609 		.enable_mask = 0x7,
610 		.autoidle_mask = 0x7,
611 		.mult_mask = 0x7ff << 8,
612 		.div1_mask = 0x7f,
613 		.max_multiplier = 2047,
614 		.max_divider = 128,
615 		.min_divider = 1,
616 		.m4xen_mask = 0x800,
617 		.lpmode_mask = 1 << 10,
618 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
619 	};
620 
621 	of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
622 }
623 CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
624 	       of_ti_omap4_m4xen_dpll_setup);
625 
626 static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
627 {
628 	const struct dpll_data dd = {
629 		.idlest_mask = 0x1,
630 		.enable_mask = 0x7,
631 		.autoidle_mask = 0x7,
632 		.mult_mask = 0xfff << 8,
633 		.div1_mask = 0xff,
634 		.max_multiplier = 4095,
635 		.max_divider = 256,
636 		.min_divider = 1,
637 		.sddiv_mask = 0xff << 24,
638 		.flags = DPLL_J_TYPE,
639 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
640 	};
641 
642 	of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
643 }
644 CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
645 	       of_ti_omap4_jtype_dpll_setup);
646 #endif
647 
648 static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
649 {
650 	const struct dpll_data dd = {
651 		.idlest_mask = 0x1,
652 		.enable_mask = 0x7,
653 		.mult_mask = 0x7ff << 8,
654 		.div1_mask = 0x7f,
655 		.max_multiplier = 2047,
656 		.max_divider = 128,
657 		.min_divider = 1,
658 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
659 	};
660 
661 	of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
662 }
663 CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
664 	       of_ti_am3_no_gate_dpll_setup);
665 
666 static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
667 {
668 	const struct dpll_data dd = {
669 		.idlest_mask = 0x1,
670 		.enable_mask = 0x7,
671 		.mult_mask = 0x7ff << 8,
672 		.div1_mask = 0x7f,
673 		.max_multiplier = 4095,
674 		.max_divider = 256,
675 		.min_divider = 2,
676 		.flags = DPLL_J_TYPE,
677 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
678 	};
679 
680 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
681 }
682 CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
683 	       of_ti_am3_jtype_dpll_setup);
684 
685 static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
686 {
687 	const struct dpll_data dd = {
688 		.idlest_mask = 0x1,
689 		.enable_mask = 0x7,
690 		.mult_mask = 0x7ff << 8,
691 		.div1_mask = 0x7f,
692 		.max_multiplier = 2047,
693 		.max_divider = 128,
694 		.min_divider = 1,
695 		.flags = DPLL_J_TYPE,
696 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
697 	};
698 
699 	of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
700 }
701 CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
702 	       "ti,am3-dpll-no-gate-j-type-clock",
703 	       of_ti_am3_no_gate_jtype_dpll_setup);
704 
705 static void __init of_ti_am3_dpll_setup(struct device_node *node)
706 {
707 	const struct dpll_data dd = {
708 		.idlest_mask = 0x1,
709 		.enable_mask = 0x7,
710 		.mult_mask = 0x7ff << 8,
711 		.div1_mask = 0x7f,
712 		.max_multiplier = 2047,
713 		.max_divider = 128,
714 		.min_divider = 1,
715 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
716 	};
717 
718 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
719 }
720 CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
721 
722 static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
723 {
724 	const struct dpll_data dd = {
725 		.idlest_mask = 0x1,
726 		.enable_mask = 0x7,
727 		.mult_mask = 0x7ff << 8,
728 		.div1_mask = 0x7f,
729 		.max_multiplier = 2047,
730 		.max_divider = 128,
731 		.min_divider = 1,
732 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
733 	};
734 
735 	of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
736 }
737 CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
738 	       of_ti_am3_core_dpll_setup);
739 
740 static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
741 {
742 	const struct dpll_data dd = {
743 		.enable_mask = 0x3,
744 		.mult_mask = 0x3ff << 12,
745 		.div1_mask = 0xf << 8,
746 		.max_divider = 16,
747 		.min_divider = 1,
748 	};
749 
750 	of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
751 }
752 CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
753 	       of_ti_omap2_core_dpll_setup);
754