xref: /openbmc/linux/drivers/clk/ti/dpll.c (revision 3031993b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * OMAP DPLL clock support
4  *
5  * Copyright (C) 2013 Texas Instruments, Inc.
6  *
7  * Tero Kristo <t-kristo@ti.com>
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/clk/ti.h>
17 #include "clock.h"
18 
19 #undef pr_fmt
20 #define pr_fmt(fmt) "%s: " fmt, __func__
21 
22 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
23 	defined(CONFIG_SOC_DRA7XX)
24 static const struct clk_ops dpll_m4xen_ck_ops = {
25 	.enable		= &omap3_noncore_dpll_enable,
26 	.disable	= &omap3_noncore_dpll_disable,
27 	.recalc_rate	= &omap4_dpll_regm4xen_recalc,
28 	.round_rate	= &omap4_dpll_regm4xen_round_rate,
29 	.set_rate	= &omap3_noncore_dpll_set_rate,
30 	.set_parent	= &omap3_noncore_dpll_set_parent,
31 	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
32 	.determine_rate	= &omap4_dpll_regm4xen_determine_rate,
33 	.get_parent	= &omap2_init_dpll_parent,
34 	.save_context	= &omap3_core_dpll_save_context,
35 	.restore_context = &omap3_core_dpll_restore_context,
36 };
37 #else
38 static const struct clk_ops dpll_m4xen_ck_ops = {};
39 #endif
40 
41 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
42 	defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
43 	defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
44 static const struct clk_ops dpll_core_ck_ops = {
45 	.recalc_rate	= &omap3_dpll_recalc,
46 	.get_parent	= &omap2_init_dpll_parent,
47 };
48 
49 static const struct clk_ops dpll_ck_ops = {
50 	.enable		= &omap3_noncore_dpll_enable,
51 	.disable	= &omap3_noncore_dpll_disable,
52 	.recalc_rate	= &omap3_dpll_recalc,
53 	.round_rate	= &omap2_dpll_round_rate,
54 	.set_rate	= &omap3_noncore_dpll_set_rate,
55 	.set_parent	= &omap3_noncore_dpll_set_parent,
56 	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
57 	.determine_rate	= &omap3_noncore_dpll_determine_rate,
58 	.get_parent	= &omap2_init_dpll_parent,
59 	.save_context	= &omap3_noncore_dpll_save_context,
60 	.restore_context = &omap3_noncore_dpll_restore_context,
61 };
62 
63 static const struct clk_ops dpll_no_gate_ck_ops = {
64 	.recalc_rate	= &omap3_dpll_recalc,
65 	.get_parent	= &omap2_init_dpll_parent,
66 	.round_rate	= &omap2_dpll_round_rate,
67 	.set_rate	= &omap3_noncore_dpll_set_rate,
68 	.set_parent	= &omap3_noncore_dpll_set_parent,
69 	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
70 	.determine_rate	= &omap3_noncore_dpll_determine_rate,
71 	.save_context	= &omap3_noncore_dpll_save_context,
72 	.restore_context = &omap3_noncore_dpll_restore_context
73 };
74 #else
75 static const struct clk_ops dpll_core_ck_ops = {};
76 static const struct clk_ops dpll_ck_ops = {};
77 static const struct clk_ops dpll_no_gate_ck_ops = {};
78 const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
79 #endif
80 
81 #ifdef CONFIG_ARCH_OMAP2
82 static const struct clk_ops omap2_dpll_core_ck_ops = {
83 	.get_parent	= &omap2_init_dpll_parent,
84 	.recalc_rate	= &omap2_dpllcore_recalc,
85 	.round_rate	= &omap2_dpll_round_rate,
86 	.set_rate	= &omap2_reprogram_dpllcore,
87 };
88 #else
89 static const struct clk_ops omap2_dpll_core_ck_ops = {};
90 #endif
91 
92 #ifdef CONFIG_ARCH_OMAP3
93 static const struct clk_ops omap3_dpll_core_ck_ops = {
94 	.get_parent	= &omap2_init_dpll_parent,
95 	.recalc_rate	= &omap3_dpll_recalc,
96 	.round_rate	= &omap2_dpll_round_rate,
97 };
98 #else
99 static const struct clk_ops omap3_dpll_core_ck_ops = {};
100 #endif
101 
102 #ifdef CONFIG_ARCH_OMAP3
103 static const struct clk_ops omap3_dpll_ck_ops = {
104 	.enable		= &omap3_noncore_dpll_enable,
105 	.disable	= &omap3_noncore_dpll_disable,
106 	.get_parent	= &omap2_init_dpll_parent,
107 	.recalc_rate	= &omap3_dpll_recalc,
108 	.set_rate	= &omap3_noncore_dpll_set_rate,
109 	.set_parent	= &omap3_noncore_dpll_set_parent,
110 	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
111 	.determine_rate	= &omap3_noncore_dpll_determine_rate,
112 	.round_rate	= &omap2_dpll_round_rate,
113 };
114 
115 static const struct clk_ops omap3_dpll5_ck_ops = {
116 	.enable		= &omap3_noncore_dpll_enable,
117 	.disable	= &omap3_noncore_dpll_disable,
118 	.get_parent	= &omap2_init_dpll_parent,
119 	.recalc_rate	= &omap3_dpll_recalc,
120 	.set_rate	= &omap3_dpll5_set_rate,
121 	.set_parent	= &omap3_noncore_dpll_set_parent,
122 	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
123 	.determine_rate	= &omap3_noncore_dpll_determine_rate,
124 	.round_rate	= &omap2_dpll_round_rate,
125 };
126 
127 static const struct clk_ops omap3_dpll_per_ck_ops = {
128 	.enable		= &omap3_noncore_dpll_enable,
129 	.disable	= &omap3_noncore_dpll_disable,
130 	.get_parent	= &omap2_init_dpll_parent,
131 	.recalc_rate	= &omap3_dpll_recalc,
132 	.set_rate	= &omap3_dpll4_set_rate,
133 	.set_parent	= &omap3_noncore_dpll_set_parent,
134 	.set_rate_and_parent	= &omap3_dpll4_set_rate_and_parent,
135 	.determine_rate	= &omap3_noncore_dpll_determine_rate,
136 	.round_rate	= &omap2_dpll_round_rate,
137 };
138 #endif
139 
140 static const struct clk_ops dpll_x2_ck_ops = {
141 	.recalc_rate	= &omap3_clkoutx2_recalc,
142 };
143 
144 /**
145  * _register_dpll - low level registration of a DPLL clock
146  * @user: pointer to the hardware clock definition for the clock
147  * @node: device node for the clock
148  *
149  * Finalizes DPLL registration process. In case a failure (clk-ref or
150  * clk-bypass is missing), the clock is added to retry list and
151  * the initialization is retried on later stage.
152  */
153 static void __init _register_dpll(void *user,
154 				  struct device_node *node)
155 {
156 	struct clk_hw *hw = user;
157 	struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
158 	struct dpll_data *dd = clk_hw->dpll_data;
159 	const char *name;
160 	struct clk *clk;
161 	const struct clk_init_data *init = hw->init;
162 
163 	clk = of_clk_get(node, 0);
164 	if (IS_ERR(clk)) {
165 		pr_debug("clk-ref missing for %pOFn, retry later\n",
166 			 node);
167 		if (!ti_clk_retry_init(node, hw, _register_dpll))
168 			return;
169 
170 		goto cleanup;
171 	}
172 
173 	dd->clk_ref = __clk_get_hw(clk);
174 
175 	clk = of_clk_get(node, 1);
176 
177 	if (IS_ERR(clk)) {
178 		pr_debug("clk-bypass missing for %pOFn, retry later\n",
179 			 node);
180 		if (!ti_clk_retry_init(node, hw, _register_dpll))
181 			return;
182 
183 		goto cleanup;
184 	}
185 
186 	dd->clk_bypass = __clk_get_hw(clk);
187 
188 	/* register the clock */
189 	name = ti_dt_clk_name(node);
190 	clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
191 
192 	if (!IS_ERR(clk)) {
193 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
194 		kfree(init->parent_names);
195 		kfree(init);
196 		return;
197 	}
198 
199 cleanup:
200 	kfree(clk_hw->dpll_data);
201 	kfree(init->parent_names);
202 	kfree(init);
203 	kfree(clk_hw);
204 }
205 
206 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
207 	defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
208 	defined(CONFIG_SOC_AM43XX)
209 /**
210  * _register_dpll_x2 - Registers a DPLLx2 clock
211  * @node: device node for this clock
212  * @ops: clk_ops for this clock
213  * @hw_ops: clk_hw_ops for this clock
214  *
215  * Initializes a DPLL x 2 clock from device tree data.
216  */
217 static void _register_dpll_x2(struct device_node *node,
218 			      const struct clk_ops *ops,
219 			      const struct clk_hw_omap_ops *hw_ops)
220 {
221 	struct clk *clk;
222 	struct clk_init_data init = { NULL };
223 	struct clk_hw_omap *clk_hw;
224 	const char *name = ti_dt_clk_name(node);
225 	const char *parent_name;
226 
227 	parent_name = of_clk_get_parent_name(node, 0);
228 	if (!parent_name) {
229 		pr_err("%pOFn must have parent\n", node);
230 		return;
231 	}
232 
233 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
234 	if (!clk_hw)
235 		return;
236 
237 	clk_hw->ops = hw_ops;
238 	clk_hw->hw.init = &init;
239 
240 	init.name = name;
241 	init.ops = ops;
242 	init.parent_names = &parent_name;
243 	init.num_parents = 1;
244 
245 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
246 	defined(CONFIG_SOC_DRA7XX)
247 	if (hw_ops == &clkhwops_omap4_dpllmx) {
248 		int ret;
249 
250 		/* Check if register defined, if not, drop hw-ops */
251 		ret = of_property_count_elems_of_size(node, "reg", 1);
252 		if (ret <= 0) {
253 			clk_hw->ops = NULL;
254 		} else if (ti_clk_get_reg_addr(node, 0, &clk_hw->clksel_reg)) {
255 			kfree(clk_hw);
256 			return;
257 		}
258 	}
259 #endif
260 
261 	/* register the clock */
262 	clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
263 
264 	if (IS_ERR(clk))
265 		kfree(clk_hw);
266 	else
267 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
268 }
269 #endif
270 
271 /**
272  * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
273  * @node: device node containing the DPLL info
274  * @ops: ops for the DPLL
275  * @ddt: DPLL data template to use
276  *
277  * Initializes a DPLL clock from device tree data.
278  */
279 static void __init of_ti_dpll_setup(struct device_node *node,
280 				    const struct clk_ops *ops,
281 				    const struct dpll_data *ddt)
282 {
283 	struct clk_hw_omap *clk_hw = NULL;
284 	struct clk_init_data *init = NULL;
285 	const char **parent_names = NULL;
286 	struct dpll_data *dd = NULL;
287 	int ssc_clk_index;
288 	u8 dpll_mode = 0;
289 	u32 min_div;
290 
291 	dd = kmemdup(ddt, sizeof(*dd), GFP_KERNEL);
292 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
293 	init = kzalloc(sizeof(*init), GFP_KERNEL);
294 	if (!dd || !clk_hw || !init)
295 		goto cleanup;
296 
297 	clk_hw->dpll_data = dd;
298 	clk_hw->ops = &clkhwops_omap3_dpll;
299 	clk_hw->hw.init = init;
300 
301 	init->name = ti_dt_clk_name(node);
302 	init->ops = ops;
303 
304 	init->num_parents = of_clk_get_parent_count(node);
305 	if (!init->num_parents) {
306 		pr_err("%pOFn must have parent(s)\n", node);
307 		goto cleanup;
308 	}
309 
310 	parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL);
311 	if (!parent_names)
312 		goto cleanup;
313 
314 	of_clk_parent_fill(node, parent_names, init->num_parents);
315 
316 	init->parent_names = parent_names;
317 
318 	if (ti_clk_get_reg_addr(node, 0, &dd->control_reg))
319 		goto cleanup;
320 
321 	/*
322 	 * Special case for OMAP2 DPLL, register order is different due to
323 	 * missing idlest_reg, also clkhwops is different. Detected from
324 	 * missing idlest_mask.
325 	 */
326 	if (!dd->idlest_mask) {
327 		if (ti_clk_get_reg_addr(node, 1, &dd->mult_div1_reg))
328 			goto cleanup;
329 #ifdef CONFIG_ARCH_OMAP2
330 		clk_hw->ops = &clkhwops_omap2xxx_dpll;
331 		omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
332 #endif
333 	} else {
334 		if (ti_clk_get_reg_addr(node, 1, &dd->idlest_reg))
335 			goto cleanup;
336 
337 		if (ti_clk_get_reg_addr(node, 2, &dd->mult_div1_reg))
338 			goto cleanup;
339 	}
340 
341 	if (dd->autoidle_mask) {
342 		if (ti_clk_get_reg_addr(node, 3, &dd->autoidle_reg))
343 			goto cleanup;
344 
345 		ssc_clk_index = 4;
346 	} else {
347 		ssc_clk_index = 3;
348 	}
349 
350 	if (dd->ssc_deltam_int_mask && dd->ssc_deltam_frac_mask &&
351 	    dd->ssc_modfreq_mant_mask && dd->ssc_modfreq_exp_mask) {
352 		if (ti_clk_get_reg_addr(node, ssc_clk_index++,
353 					&dd->ssc_deltam_reg))
354 			goto cleanup;
355 
356 		if (ti_clk_get_reg_addr(node, ssc_clk_index++,
357 					&dd->ssc_modfreq_reg))
358 			goto cleanup;
359 
360 		of_property_read_u32(node, "ti,ssc-modfreq-hz",
361 				     &dd->ssc_modfreq);
362 		of_property_read_u32(node, "ti,ssc-deltam", &dd->ssc_deltam);
363 		dd->ssc_downspread =
364 			of_property_read_bool(node, "ti,ssc-downspread");
365 	}
366 
367 	if (of_property_read_bool(node, "ti,low-power-stop"))
368 		dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
369 
370 	if (of_property_read_bool(node, "ti,low-power-bypass"))
371 		dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
372 
373 	if (of_property_read_bool(node, "ti,lock"))
374 		dpll_mode |= 1 << DPLL_LOCKED;
375 
376 	if (!of_property_read_u32(node, "ti,min-div", &min_div) &&
377 	    min_div > dd->min_divider)
378 		dd->min_divider = min_div;
379 
380 	if (dpll_mode)
381 		dd->modes = dpll_mode;
382 
383 	_register_dpll(&clk_hw->hw, node);
384 	return;
385 
386 cleanup:
387 	kfree(dd);
388 	kfree(parent_names);
389 	kfree(init);
390 	kfree(clk_hw);
391 }
392 
393 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
394 	defined(CONFIG_SOC_DRA7XX)
395 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
396 {
397 	_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
398 }
399 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
400 	       of_ti_omap4_dpll_x2_setup);
401 #endif
402 
403 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
404 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
405 {
406 	_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
407 }
408 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
409 	       of_ti_am3_dpll_x2_setup);
410 #endif
411 
412 #ifdef CONFIG_ARCH_OMAP3
413 static void __init of_ti_omap3_dpll_setup(struct device_node *node)
414 {
415 	const struct dpll_data dd = {
416 		.idlest_mask = 0x1,
417 		.enable_mask = 0x7,
418 		.autoidle_mask = 0x7,
419 		.mult_mask = 0x7ff << 8,
420 		.div1_mask = 0x7f,
421 		.max_multiplier = 2047,
422 		.max_divider = 128,
423 		.min_divider = 1,
424 		.freqsel_mask = 0xf0,
425 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
426 	};
427 
428 	if ((of_machine_is_compatible("ti,omap3630") ||
429 	     of_machine_is_compatible("ti,omap36xx")) &&
430 	     of_node_name_eq(node, "dpll5_ck"))
431 		of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
432 	else
433 		of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
434 }
435 CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
436 	       of_ti_omap3_dpll_setup);
437 
438 static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
439 {
440 	const struct dpll_data dd = {
441 		.idlest_mask = 0x1,
442 		.enable_mask = 0x7,
443 		.autoidle_mask = 0x7,
444 		.mult_mask = 0x7ff << 16,
445 		.div1_mask = 0x7f << 8,
446 		.max_multiplier = 2047,
447 		.max_divider = 128,
448 		.min_divider = 1,
449 		.freqsel_mask = 0xf0,
450 	};
451 
452 	of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
453 }
454 CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
455 	       of_ti_omap3_core_dpll_setup);
456 
457 static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
458 {
459 	const struct dpll_data dd = {
460 		.idlest_mask = 0x1 << 1,
461 		.enable_mask = 0x7 << 16,
462 		.autoidle_mask = 0x7 << 3,
463 		.mult_mask = 0x7ff << 8,
464 		.div1_mask = 0x7f,
465 		.max_multiplier = 2047,
466 		.max_divider = 128,
467 		.min_divider = 1,
468 		.freqsel_mask = 0xf00000,
469 		.modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
470 	};
471 
472 	of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
473 }
474 CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
475 	       of_ti_omap3_per_dpll_setup);
476 
477 static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
478 {
479 	const struct dpll_data dd = {
480 		.idlest_mask = 0x1 << 1,
481 		.enable_mask = 0x7 << 16,
482 		.autoidle_mask = 0x7 << 3,
483 		.mult_mask = 0xfff << 8,
484 		.div1_mask = 0x7f,
485 		.max_multiplier = 4095,
486 		.max_divider = 128,
487 		.min_divider = 1,
488 		.sddiv_mask = 0xff << 24,
489 		.dco_mask = 0xe << 20,
490 		.flags = DPLL_J_TYPE,
491 		.modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
492 	};
493 
494 	of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
495 }
496 CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
497 	       of_ti_omap3_per_jtype_dpll_setup);
498 #endif
499 
500 static void __init of_ti_omap4_dpll_setup(struct device_node *node)
501 {
502 	const struct dpll_data dd = {
503 		.idlest_mask = 0x1,
504 		.enable_mask = 0x7,
505 		.autoidle_mask = 0x7,
506 		.mult_mask = 0x7ff << 8,
507 		.div1_mask = 0x7f,
508 		.max_multiplier = 2047,
509 		.max_divider = 128,
510 		.min_divider = 1,
511 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
512 	};
513 
514 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
515 }
516 CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
517 	       of_ti_omap4_dpll_setup);
518 
519 static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
520 {
521 	const struct dpll_data dd = {
522 		.idlest_mask = 0x1,
523 		.enable_mask = 0x7,
524 		.autoidle_mask = 0x7,
525 		.mult_mask = 0x7ff << 8,
526 		.div1_mask = 0x7f,
527 		.max_multiplier = 2047,
528 		.max_divider = 128,
529 		.dcc_mask = BIT(22),
530 		.dcc_rate = 1400000000, /* DCC beyond 1.4GHz */
531 		.min_divider = 1,
532 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
533 	};
534 
535 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
536 }
537 CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
538 	       of_ti_omap5_mpu_dpll_setup);
539 
540 static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
541 {
542 	const struct dpll_data dd = {
543 		.idlest_mask = 0x1,
544 		.enable_mask = 0x7,
545 		.autoidle_mask = 0x7,
546 		.mult_mask = 0x7ff << 8,
547 		.div1_mask = 0x7f,
548 		.max_multiplier = 2047,
549 		.max_divider = 128,
550 		.min_divider = 1,
551 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
552 	};
553 
554 	of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
555 }
556 CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
557 	       of_ti_omap4_core_dpll_setup);
558 
559 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
560 	defined(CONFIG_SOC_DRA7XX)
561 static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
562 {
563 	const struct dpll_data dd = {
564 		.idlest_mask = 0x1,
565 		.enable_mask = 0x7,
566 		.autoidle_mask = 0x7,
567 		.mult_mask = 0x7ff << 8,
568 		.div1_mask = 0x7f,
569 		.max_multiplier = 2047,
570 		.max_divider = 128,
571 		.min_divider = 1,
572 		.m4xen_mask = 0x800,
573 		.lpmode_mask = 1 << 10,
574 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
575 	};
576 
577 	of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
578 }
579 CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
580 	       of_ti_omap4_m4xen_dpll_setup);
581 
582 static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
583 {
584 	const struct dpll_data dd = {
585 		.idlest_mask = 0x1,
586 		.enable_mask = 0x7,
587 		.autoidle_mask = 0x7,
588 		.mult_mask = 0xfff << 8,
589 		.div1_mask = 0xff,
590 		.max_multiplier = 4095,
591 		.max_divider = 256,
592 		.min_divider = 1,
593 		.sddiv_mask = 0xff << 24,
594 		.flags = DPLL_J_TYPE,
595 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
596 	};
597 
598 	of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
599 }
600 CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
601 	       of_ti_omap4_jtype_dpll_setup);
602 #endif
603 
604 static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
605 {
606 	const struct dpll_data dd = {
607 		.idlest_mask = 0x1,
608 		.enable_mask = 0x7,
609 		.ssc_enable_mask = 0x1 << 12,
610 		.ssc_downspread_mask = 0x1 << 14,
611 		.mult_mask = 0x7ff << 8,
612 		.div1_mask = 0x7f,
613 		.ssc_deltam_int_mask = 0x3 << 18,
614 		.ssc_deltam_frac_mask = 0x3ffff,
615 		.ssc_modfreq_mant_mask = 0x7f,
616 		.ssc_modfreq_exp_mask = 0x7 << 8,
617 		.max_multiplier = 2047,
618 		.max_divider = 128,
619 		.min_divider = 1,
620 		.max_rate = 1000000000,
621 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
622 	};
623 
624 	of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
625 }
626 CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
627 	       of_ti_am3_no_gate_dpll_setup);
628 
629 static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
630 {
631 	const struct dpll_data dd = {
632 		.idlest_mask = 0x1,
633 		.enable_mask = 0x7,
634 		.mult_mask = 0x7ff << 8,
635 		.div1_mask = 0x7f,
636 		.max_multiplier = 4095,
637 		.max_divider = 256,
638 		.min_divider = 2,
639 		.flags = DPLL_J_TYPE,
640 		.max_rate = 2000000000,
641 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
642 	};
643 
644 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
645 }
646 CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
647 	       of_ti_am3_jtype_dpll_setup);
648 
649 static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
650 {
651 	const struct dpll_data dd = {
652 		.idlest_mask = 0x1,
653 		.enable_mask = 0x7,
654 		.mult_mask = 0x7ff << 8,
655 		.div1_mask = 0x7f,
656 		.max_multiplier = 2047,
657 		.max_divider = 128,
658 		.min_divider = 1,
659 		.max_rate = 2000000000,
660 		.flags = DPLL_J_TYPE,
661 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
662 	};
663 
664 	of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
665 }
666 CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
667 	       "ti,am3-dpll-no-gate-j-type-clock",
668 	       of_ti_am3_no_gate_jtype_dpll_setup);
669 
670 static void __init of_ti_am3_dpll_setup(struct device_node *node)
671 {
672 	const struct dpll_data dd = {
673 		.idlest_mask = 0x1,
674 		.enable_mask = 0x7,
675 		.ssc_enable_mask = 0x1 << 12,
676 		.ssc_downspread_mask = 0x1 << 14,
677 		.mult_mask = 0x7ff << 8,
678 		.div1_mask = 0x7f,
679 		.ssc_deltam_int_mask = 0x3 << 18,
680 		.ssc_deltam_frac_mask = 0x3ffff,
681 		.ssc_modfreq_mant_mask = 0x7f,
682 		.ssc_modfreq_exp_mask = 0x7 << 8,
683 		.max_multiplier = 2047,
684 		.max_divider = 128,
685 		.min_divider = 1,
686 		.max_rate = 1000000000,
687 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
688 	};
689 
690 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
691 }
692 CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
693 
694 static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
695 {
696 	const struct dpll_data dd = {
697 		.idlest_mask = 0x1,
698 		.enable_mask = 0x7,
699 		.mult_mask = 0x7ff << 8,
700 		.div1_mask = 0x7f,
701 		.max_multiplier = 2047,
702 		.max_divider = 128,
703 		.min_divider = 1,
704 		.max_rate = 1000000000,
705 		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
706 	};
707 
708 	of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
709 }
710 CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
711 	       of_ti_am3_core_dpll_setup);
712 
713 static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
714 {
715 	const struct dpll_data dd = {
716 		.enable_mask = 0x3,
717 		.mult_mask = 0x3ff << 12,
718 		.div1_mask = 0xf << 8,
719 		.max_divider = 16,
720 		.min_divider = 1,
721 	};
722 
723 	of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
724 }
725 CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
726 	       of_ti_omap2_core_dpll_setup);
727