xref: /openbmc/linux/drivers/clk/ti/clkctrl.c (revision 462cd772)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * OMAP clkctrl clock support
4  *
5  * Copyright (C) 2017 Texas Instruments, Inc.
6  *
7  * Tero Kristo <t-kristo@ti.com>
8  */
9 
10 #include <linux/clk-provider.h>
11 #include <linux/slab.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/clk/ti.h>
15 #include <linux/delay.h>
16 #include <linux/timekeeping.h>
17 #include "clock.h"
18 
19 #define NO_IDLEST			0
20 
21 #define OMAP4_MODULEMODE_MASK		0x3
22 
23 #define MODULEMODE_HWCTRL		0x1
24 #define MODULEMODE_SWCTRL		0x2
25 
26 #define OMAP4_IDLEST_MASK		(0x3 << 16)
27 #define OMAP4_IDLEST_SHIFT		16
28 
29 #define OMAP4_STBYST_MASK		BIT(18)
30 #define OMAP4_STBYST_SHIFT		18
31 
32 #define CLKCTRL_IDLEST_FUNCTIONAL	0x0
33 #define CLKCTRL_IDLEST_INTERFACE_IDLE	0x2
34 #define CLKCTRL_IDLEST_DISABLED		0x3
35 
36 /* These timeouts are in us */
37 #define OMAP4_MAX_MODULE_READY_TIME	2000
38 #define OMAP4_MAX_MODULE_DISABLE_TIME	5000
39 
40 static bool _early_timeout = true;
41 
42 struct omap_clkctrl_provider {
43 	void __iomem *base;
44 	struct list_head clocks;
45 	char *clkdm_name;
46 };
47 
48 struct omap_clkctrl_clk {
49 	struct clk_hw *clk;
50 	u16 reg_offset;
51 	int bit_offset;
52 	struct list_head node;
53 };
54 
55 union omap4_timeout {
56 	u32 cycles;
57 	ktime_t start;
58 };
59 
60 static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
61 	{ 0 },
62 };
63 
64 static u32 _omap4_idlest(u32 val)
65 {
66 	val &= OMAP4_IDLEST_MASK;
67 	val >>= OMAP4_IDLEST_SHIFT;
68 
69 	return val;
70 }
71 
72 static bool _omap4_is_idle(u32 val)
73 {
74 	val = _omap4_idlest(val);
75 
76 	return val == CLKCTRL_IDLEST_DISABLED;
77 }
78 
79 static bool _omap4_is_ready(u32 val)
80 {
81 	val = _omap4_idlest(val);
82 
83 	return val == CLKCTRL_IDLEST_FUNCTIONAL ||
84 	       val == CLKCTRL_IDLEST_INTERFACE_IDLE;
85 }
86 
87 static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
88 {
89 	/*
90 	 * There are two special cases where ktime_to_ns() can't be
91 	 * used to track the timeouts. First one is during early boot
92 	 * when the timers haven't been initialized yet. The second
93 	 * one is during suspend-resume cycle while timekeeping is
94 	 * being suspended / resumed. Clocksource for the system
95 	 * can be from a timer that requires pm_runtime access, which
96 	 * will eventually bring us here with timekeeping_suspended,
97 	 * during both suspend entry and resume paths. This happens
98 	 * at least on am43xx platform. Account for flakeyness
99 	 * with udelay() by multiplying the timeout value by 2.
100 	 */
101 	if (unlikely(_early_timeout || timekeeping_suspended)) {
102 		if (time->cycles++ < timeout) {
103 			udelay(1 * 2);
104 			return false;
105 		}
106 	} else {
107 		if (!ktime_to_ns(time->start)) {
108 			time->start = ktime_get();
109 			return false;
110 		}
111 
112 		if (ktime_us_delta(ktime_get(), time->start) < timeout) {
113 			cpu_relax();
114 			return false;
115 		}
116 	}
117 
118 	return true;
119 }
120 
121 static int __init _omap4_disable_early_timeout(void)
122 {
123 	_early_timeout = false;
124 
125 	return 0;
126 }
127 arch_initcall(_omap4_disable_early_timeout);
128 
129 static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
130 {
131 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
132 	u32 val;
133 	int ret;
134 	union omap4_timeout timeout = { 0 };
135 
136 	if (clk->clkdm) {
137 		ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
138 		if (ret) {
139 			WARN(1,
140 			     "%s: could not enable %s's clockdomain %s: %d\n",
141 			     __func__, clk_hw_get_name(hw),
142 			     clk->clkdm_name, ret);
143 			return ret;
144 		}
145 	}
146 
147 	if (!clk->enable_bit)
148 		return 0;
149 
150 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
151 
152 	val &= ~OMAP4_MODULEMODE_MASK;
153 	val |= clk->enable_bit;
154 
155 	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
156 
157 	if (test_bit(NO_IDLEST, &clk->flags))
158 		return 0;
159 
160 	/* Wait until module is enabled */
161 	while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
162 		if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
163 			pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
164 			return -EBUSY;
165 		}
166 	}
167 
168 	return 0;
169 }
170 
171 static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
172 {
173 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
174 	u32 val;
175 	union omap4_timeout timeout = { 0 };
176 
177 	if (!clk->enable_bit)
178 		goto exit;
179 
180 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
181 
182 	val &= ~OMAP4_MODULEMODE_MASK;
183 
184 	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
185 
186 	if (test_bit(NO_IDLEST, &clk->flags))
187 		goto exit;
188 
189 	/* Wait until module is disabled */
190 	while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
191 		if (_omap4_is_timeout(&timeout,
192 				      OMAP4_MAX_MODULE_DISABLE_TIME)) {
193 			pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
194 			break;
195 		}
196 	}
197 
198 exit:
199 	if (clk->clkdm)
200 		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
201 }
202 
203 static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
204 {
205 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
206 	u32 val;
207 
208 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
209 
210 	if (val & clk->enable_bit)
211 		return 1;
212 
213 	return 0;
214 }
215 
216 static const struct clk_ops omap4_clkctrl_clk_ops = {
217 	.enable		= _omap4_clkctrl_clk_enable,
218 	.disable	= _omap4_clkctrl_clk_disable,
219 	.is_enabled	= _omap4_clkctrl_clk_is_enabled,
220 	.init		= omap2_init_clk_clkdm,
221 };
222 
223 static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
224 					      void *data)
225 {
226 	struct omap_clkctrl_provider *provider = data;
227 	struct omap_clkctrl_clk *entry = NULL, *iter;
228 
229 	if (clkspec->args_count != 2)
230 		return ERR_PTR(-EINVAL);
231 
232 	pr_debug("%s: looking for %x:%x\n", __func__,
233 		 clkspec->args[0], clkspec->args[1]);
234 
235 	list_for_each_entry(iter, &provider->clocks, node) {
236 		if (iter->reg_offset == clkspec->args[0] &&
237 		    iter->bit_offset == clkspec->args[1]) {
238 			entry = iter;
239 			break;
240 		}
241 	}
242 
243 	if (!entry)
244 		return ERR_PTR(-EINVAL);
245 
246 	return entry->clk;
247 }
248 
249 /* Get clkctrl clock base name based on clkctrl_name or dts node */
250 static const char * __init clkctrl_get_clock_name(struct device_node *np,
251 						  const char *clkctrl_name,
252 						  int offset, int index,
253 						  bool legacy_naming)
254 {
255 	char *clock_name;
256 
257 	/* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
258 	if (clkctrl_name && !legacy_naming) {
259 		clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
260 				       clkctrl_name, offset, index);
261 		strreplace(clock_name, '_', '-');
262 
263 		return clock_name;
264 	}
265 
266 	/* l4per:1234:0 old style naming based on clkctrl_name */
267 	if (clkctrl_name)
268 		return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
269 				 clkctrl_name, offset, index);
270 
271 	/* l4per_cm:1234:0 old style naming based on parent node name */
272 	if (legacy_naming)
273 		return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
274 				 np->parent, offset, index);
275 
276 	/* l4per-clkctrl:1234:0 style naming based on node name */
277 	return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
278 }
279 
280 static int __init
281 _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
282 			 struct device_node *node, struct clk_hw *clk_hw,
283 			 u16 offset, u8 bit, const char * const *parents,
284 			 int num_parents, const struct clk_ops *ops,
285 			 const char *clkctrl_name)
286 {
287 	struct clk_init_data init = { NULL };
288 	struct clk *clk;
289 	struct omap_clkctrl_clk *clkctrl_clk;
290 	int ret = 0;
291 
292 	init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
293 					   ti_clk_get_features()->flags &
294 					   TI_CLK_CLKCTRL_COMPAT);
295 
296 	clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
297 	if (!init.name || !clkctrl_clk) {
298 		ret = -ENOMEM;
299 		goto cleanup;
300 	}
301 
302 	clk_hw->init = &init;
303 	init.parent_names = parents;
304 	init.num_parents = num_parents;
305 	init.ops = ops;
306 	init.flags = 0;
307 
308 	clk = ti_clk_register(NULL, clk_hw, init.name);
309 	if (IS_ERR_OR_NULL(clk)) {
310 		ret = -EINVAL;
311 		goto cleanup;
312 	}
313 
314 	clkctrl_clk->reg_offset = offset;
315 	clkctrl_clk->bit_offset = bit;
316 	clkctrl_clk->clk = clk_hw;
317 
318 	list_add(&clkctrl_clk->node, &provider->clocks);
319 
320 	return 0;
321 
322 cleanup:
323 	kfree(init.name);
324 	kfree(clkctrl_clk);
325 	return ret;
326 }
327 
328 static void __init
329 _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
330 		       struct device_node *node, u16 offset,
331 		       const struct omap_clkctrl_bit_data *data,
332 		       void __iomem *reg, const char *clkctrl_name)
333 {
334 	struct clk_hw_omap *clk_hw;
335 
336 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
337 	if (!clk_hw)
338 		return;
339 
340 	clk_hw->enable_bit = data->bit;
341 	clk_hw->enable_reg.ptr = reg;
342 
343 	if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
344 				     data->bit, data->parents, 1,
345 				     &omap_gate_clk_ops, clkctrl_name))
346 		kfree(clk_hw);
347 }
348 
349 static void __init
350 _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
351 		      struct device_node *node, u16 offset,
352 		      const struct omap_clkctrl_bit_data *data,
353 		      void __iomem *reg, const char *clkctrl_name)
354 {
355 	struct clk_omap_mux *mux;
356 	int num_parents = 0;
357 	const char * const *pname;
358 
359 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
360 	if (!mux)
361 		return;
362 
363 	pname = data->parents;
364 	while (*pname) {
365 		num_parents++;
366 		pname++;
367 	}
368 
369 	mux->mask = num_parents;
370 	if (!(mux->flags & CLK_MUX_INDEX_ONE))
371 		mux->mask--;
372 
373 	mux->mask = (1 << fls(mux->mask)) - 1;
374 
375 	mux->shift = data->bit;
376 	mux->reg.ptr = reg;
377 
378 	if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
379 				     data->bit, data->parents, num_parents,
380 				     &ti_clk_mux_ops, clkctrl_name))
381 		kfree(mux);
382 }
383 
384 static void __init
385 _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
386 		      struct device_node *node, u16 offset,
387 		      const struct omap_clkctrl_bit_data *data,
388 		      void __iomem *reg, const char *clkctrl_name)
389 {
390 	struct clk_omap_divider *div;
391 	const struct omap_clkctrl_div_data *div_data = data->data;
392 	u8 div_flags = 0;
393 
394 	div = kzalloc(sizeof(*div), GFP_KERNEL);
395 	if (!div)
396 		return;
397 
398 	div->reg.ptr = reg;
399 	div->shift = data->bit;
400 	div->flags = div_data->flags;
401 
402 	if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
403 		div_flags |= CLKF_INDEX_POWER_OF_TWO;
404 
405 	if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
406 				      div_data->max_div, div_flags,
407 				      div)) {
408 		pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
409 		       node, offset, data->bit);
410 		kfree(div);
411 		return;
412 	}
413 
414 	if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
415 				     data->bit, data->parents, 1,
416 				     &ti_clk_divider_ops, clkctrl_name))
417 		kfree(div);
418 }
419 
420 static void __init
421 _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
422 			  struct device_node *node,
423 			  const struct omap_clkctrl_reg_data *data,
424 			  void __iomem *reg, const char *clkctrl_name)
425 {
426 	const struct omap_clkctrl_bit_data *bits = data->bit_data;
427 
428 	if (!bits)
429 		return;
430 
431 	while (bits->bit) {
432 		switch (bits->type) {
433 		case TI_CLK_GATE:
434 			_ti_clkctrl_setup_gate(provider, node, data->offset,
435 					       bits, reg, clkctrl_name);
436 			break;
437 
438 		case TI_CLK_DIVIDER:
439 			_ti_clkctrl_setup_div(provider, node, data->offset,
440 					      bits, reg, clkctrl_name);
441 			break;
442 
443 		case TI_CLK_MUX:
444 			_ti_clkctrl_setup_mux(provider, node, data->offset,
445 					      bits, reg, clkctrl_name);
446 			break;
447 
448 		default:
449 			pr_err("%s: bad subclk type: %d\n", __func__,
450 			       bits->type);
451 			return;
452 		}
453 		bits++;
454 	}
455 }
456 
457 static void __init _clkctrl_add_provider(void *data,
458 					 struct device_node *np)
459 {
460 	of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
461 }
462 
463 /*
464  * Get clock name based on "clock-output-names" property or the
465  * compatible property for clkctrl.
466  */
467 static const char * __init clkctrl_get_name(struct device_node *np)
468 {
469 	struct property *prop;
470 	const int prefix_len = 11;
471 	const char *compat;
472 	const char *output;
473 	char *name;
474 
475 	if (!of_property_read_string_index(np, "clock-output-names", 0,
476 					   &output)) {
477 		const char *end;
478 		int len;
479 
480 		len = strlen(output);
481 		end = strstr(output, "_clkctrl");
482 		if (end)
483 			len -= strlen(end);
484 		name = kstrndup(output, len, GFP_KERNEL);
485 
486 		return name;
487 	}
488 
489 	of_property_for_each_string(np, "compatible", prop, compat) {
490 		if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
491 			/* Two letter minimum name length for l3, l4 etc */
492 			if (strnlen(compat + prefix_len, 16) < 2)
493 				continue;
494 			name = kasprintf(GFP_KERNEL, "%s", compat + prefix_len);
495 			if (!name)
496 				continue;
497 			strreplace(name, '-', '_');
498 
499 			return name;
500 		}
501 	}
502 
503 	return NULL;
504 }
505 
506 static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
507 {
508 	struct omap_clkctrl_provider *provider;
509 	const struct omap_clkctrl_data *data = default_clkctrl_data;
510 	const struct omap_clkctrl_reg_data *reg_data;
511 	struct clk_init_data init = { NULL };
512 	struct clk_hw_omap *hw;
513 	struct clk *clk;
514 	struct omap_clkctrl_clk *clkctrl_clk = NULL;
515 	const __be32 *addrp;
516 	bool legacy_naming;
517 	const char *clkctrl_name;
518 	u32 addr;
519 	int ret;
520 	char *c;
521 	u16 soc_mask = 0;
522 
523 	addrp = of_get_address(node, 0, NULL, NULL);
524 	addr = (u32)of_translate_address(node, addrp);
525 
526 #ifdef CONFIG_ARCH_OMAP4
527 	if (of_machine_is_compatible("ti,omap4"))
528 		data = omap4_clkctrl_data;
529 #endif
530 #ifdef CONFIG_SOC_OMAP5
531 	if (of_machine_is_compatible("ti,omap5"))
532 		data = omap5_clkctrl_data;
533 #endif
534 #ifdef CONFIG_SOC_DRA7XX
535 	if (of_machine_is_compatible("ti,dra7"))
536 		data = dra7_clkctrl_data;
537 	if (of_machine_is_compatible("ti,dra72"))
538 		soc_mask = CLKF_SOC_DRA72;
539 	if (of_machine_is_compatible("ti,dra74"))
540 		soc_mask = CLKF_SOC_DRA74;
541 	if (of_machine_is_compatible("ti,dra76"))
542 		soc_mask = CLKF_SOC_DRA76;
543 #endif
544 #ifdef CONFIG_SOC_AM33XX
545 	if (of_machine_is_compatible("ti,am33xx"))
546 		data = am3_clkctrl_data;
547 #endif
548 #ifdef CONFIG_SOC_AM43XX
549 	if (of_machine_is_compatible("ti,am4372"))
550 		data = am4_clkctrl_data;
551 
552 	if (of_machine_is_compatible("ti,am438x"))
553 		data = am438x_clkctrl_data;
554 #endif
555 #ifdef CONFIG_SOC_TI81XX
556 	if (of_machine_is_compatible("ti,dm814"))
557 		data = dm814_clkctrl_data;
558 
559 	if (of_machine_is_compatible("ti,dm816"))
560 		data = dm816_clkctrl_data;
561 #endif
562 
563 	if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
564 		soc_mask |= CLKF_SOC_NONSEC;
565 
566 	while (data->addr) {
567 		if (addr == data->addr)
568 			break;
569 
570 		data++;
571 	}
572 
573 	if (!data->addr) {
574 		pr_err("%pOF not found from clkctrl data.\n", node);
575 		return;
576 	}
577 
578 	provider = kzalloc(sizeof(*provider), GFP_KERNEL);
579 	if (!provider)
580 		return;
581 
582 	provider->base = of_iomap(node, 0);
583 
584 	legacy_naming = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
585 	clkctrl_name = clkctrl_get_name(node);
586 	if (clkctrl_name) {
587 		provider->clkdm_name = kasprintf(GFP_KERNEL,
588 						 "%s_clkdm", clkctrl_name);
589 		goto clkdm_found;
590 	}
591 
592 	/*
593 	 * The code below can be removed when all clkctrl nodes use domain
594 	 * specific compatible property and standard clock node naming
595 	 */
596 	if (legacy_naming) {
597 		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
598 		if (!provider->clkdm_name) {
599 			kfree(provider);
600 			return;
601 		}
602 
603 		/*
604 		 * Create default clkdm name, replace _cm from end of parent
605 		 * node name with _clkdm
606 		 */
607 		provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
608 	} else {
609 		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
610 		if (!provider->clkdm_name) {
611 			kfree(provider);
612 			return;
613 		}
614 
615 		/*
616 		 * Create default clkdm name, replace _clkctrl from end of
617 		 * node name with _clkdm
618 		 */
619 		provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
620 	}
621 
622 	strcat(provider->clkdm_name, "clkdm");
623 
624 	/* Replace any dash from the clkdm name with underscore */
625 	c = provider->clkdm_name;
626 
627 	while (*c) {
628 		if (*c == '-')
629 			*c = '_';
630 		c++;
631 	}
632 clkdm_found:
633 	INIT_LIST_HEAD(&provider->clocks);
634 
635 	/* Generate clocks */
636 	reg_data = data->regs;
637 
638 	while (reg_data->parent) {
639 		if ((reg_data->flags & CLKF_SOC_MASK) &&
640 		    (reg_data->flags & soc_mask) == 0) {
641 			reg_data++;
642 			continue;
643 		}
644 
645 		hw = kzalloc(sizeof(*hw), GFP_KERNEL);
646 		if (!hw)
647 			return;
648 
649 		hw->enable_reg.ptr = provider->base + reg_data->offset;
650 
651 		_ti_clkctrl_setup_subclks(provider, node, reg_data,
652 					  hw->enable_reg.ptr, clkctrl_name);
653 
654 		if (reg_data->flags & CLKF_SW_SUP)
655 			hw->enable_bit = MODULEMODE_SWCTRL;
656 		if (reg_data->flags & CLKF_HW_SUP)
657 			hw->enable_bit = MODULEMODE_HWCTRL;
658 		if (reg_data->flags & CLKF_NO_IDLEST)
659 			set_bit(NO_IDLEST, &hw->flags);
660 
661 		if (reg_data->clkdm_name)
662 			hw->clkdm_name = reg_data->clkdm_name;
663 		else
664 			hw->clkdm_name = provider->clkdm_name;
665 
666 		init.parent_names = &reg_data->parent;
667 		init.num_parents = 1;
668 		init.flags = 0;
669 		if (reg_data->flags & CLKF_SET_RATE_PARENT)
670 			init.flags |= CLK_SET_RATE_PARENT;
671 
672 		init.name = clkctrl_get_clock_name(node, clkctrl_name,
673 						   reg_data->offset, 0,
674 						   legacy_naming);
675 		if (!init.name)
676 			goto cleanup;
677 
678 		clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
679 		if (!clkctrl_clk)
680 			goto cleanup;
681 
682 		init.ops = &omap4_clkctrl_clk_ops;
683 		hw->hw.init = &init;
684 
685 		clk = ti_clk_register_omap_hw(NULL, &hw->hw, init.name);
686 		if (IS_ERR_OR_NULL(clk))
687 			goto cleanup;
688 
689 		clkctrl_clk->reg_offset = reg_data->offset;
690 		clkctrl_clk->clk = &hw->hw;
691 
692 		list_add(&clkctrl_clk->node, &provider->clocks);
693 
694 		reg_data++;
695 	}
696 
697 	ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
698 	if (ret == -EPROBE_DEFER)
699 		ti_clk_retry_init(node, provider, _clkctrl_add_provider);
700 
701 	kfree(clkctrl_name);
702 
703 	return;
704 
705 cleanup:
706 	kfree(hw);
707 	kfree(init.name);
708 	kfree(clkctrl_name);
709 	kfree(clkctrl_clk);
710 }
711 CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
712 	       _ti_omap4_clkctrl_setup);
713 
714 /**
715  * ti_clk_is_in_standby - Check if clkctrl clock is in standby or not
716  * @clk: clock to check standby status for
717  *
718  * Finds whether the provided clock is in standby mode or not. Returns
719  * true if the provided clock is a clkctrl type clock and it is in standby,
720  * false otherwise.
721  */
722 bool ti_clk_is_in_standby(struct clk *clk)
723 {
724 	struct clk_hw *hw;
725 	struct clk_hw_omap *hwclk;
726 	u32 val;
727 
728 	hw = __clk_get_hw(clk);
729 
730 	if (!omap2_clk_is_hw_omap(hw))
731 		return false;
732 
733 	hwclk = to_clk_hw_omap(hw);
734 
735 	val = ti_clk_ll_ops->clk_readl(&hwclk->enable_reg);
736 
737 	if (val & OMAP4_STBYST_MASK)
738 		return true;
739 
740 	return false;
741 }
742 EXPORT_SYMBOL_GPL(ti_clk_is_in_standby);
743