xref: /openbmc/linux/drivers/clk/ti/clkctrl.c (revision e7253313)
1 /*
2  * OMAP clkctrl clock support
3  *
4  * Copyright (C) 2017 Texas Instruments, Inc.
5  *
6  * Tero Kristo <t-kristo@ti.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13  * kind, whether express or implied; without even the implied warranty
14  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #include <linux/clk-provider.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21 #include <linux/of_address.h>
22 #include <linux/clk/ti.h>
23 #include <linux/delay.h>
24 #include <linux/timekeeping.h>
25 #include "clock.h"
26 
27 #define NO_IDLEST			0
28 
29 #define OMAP4_MODULEMODE_MASK		0x3
30 
31 #define MODULEMODE_HWCTRL		0x1
32 #define MODULEMODE_SWCTRL		0x2
33 
34 #define OMAP4_IDLEST_MASK		(0x3 << 16)
35 #define OMAP4_IDLEST_SHIFT		16
36 
37 #define OMAP4_STBYST_MASK		BIT(18)
38 #define OMAP4_STBYST_SHIFT		18
39 
40 #define CLKCTRL_IDLEST_FUNCTIONAL	0x0
41 #define CLKCTRL_IDLEST_INTERFACE_IDLE	0x2
42 #define CLKCTRL_IDLEST_DISABLED		0x3
43 
44 /* These timeouts are in us */
45 #define OMAP4_MAX_MODULE_READY_TIME	2000
46 #define OMAP4_MAX_MODULE_DISABLE_TIME	5000
47 
48 static bool _early_timeout = true;
49 
50 struct omap_clkctrl_provider {
51 	void __iomem *base;
52 	struct list_head clocks;
53 	char *clkdm_name;
54 };
55 
56 struct omap_clkctrl_clk {
57 	struct clk_hw *clk;
58 	u16 reg_offset;
59 	int bit_offset;
60 	struct list_head node;
61 };
62 
63 union omap4_timeout {
64 	u32 cycles;
65 	ktime_t start;
66 };
67 
68 static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
69 	{ 0 },
70 };
71 
72 static u32 _omap4_idlest(u32 val)
73 {
74 	val &= OMAP4_IDLEST_MASK;
75 	val >>= OMAP4_IDLEST_SHIFT;
76 
77 	return val;
78 }
79 
80 static bool _omap4_is_idle(u32 val)
81 {
82 	val = _omap4_idlest(val);
83 
84 	return val == CLKCTRL_IDLEST_DISABLED;
85 }
86 
87 static bool _omap4_is_ready(u32 val)
88 {
89 	val = _omap4_idlest(val);
90 
91 	return val == CLKCTRL_IDLEST_FUNCTIONAL ||
92 	       val == CLKCTRL_IDLEST_INTERFACE_IDLE;
93 }
94 
95 static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
96 {
97 	/*
98 	 * There are two special cases where ktime_to_ns() can't be
99 	 * used to track the timeouts. First one is during early boot
100 	 * when the timers haven't been initialized yet. The second
101 	 * one is during suspend-resume cycle while timekeeping is
102 	 * being suspended / resumed. Clocksource for the system
103 	 * can be from a timer that requires pm_runtime access, which
104 	 * will eventually bring us here with timekeeping_suspended,
105 	 * during both suspend entry and resume paths. This happens
106 	 * at least on am43xx platform. Account for flakeyness
107 	 * with udelay() by multiplying the timeout value by 2.
108 	 */
109 	if (unlikely(_early_timeout || timekeeping_suspended)) {
110 		if (time->cycles++ < timeout) {
111 			udelay(1 * 2);
112 			return false;
113 		}
114 	} else {
115 		if (!ktime_to_ns(time->start)) {
116 			time->start = ktime_get();
117 			return false;
118 		}
119 
120 		if (ktime_us_delta(ktime_get(), time->start) < timeout) {
121 			cpu_relax();
122 			return false;
123 		}
124 	}
125 
126 	return true;
127 }
128 
129 static int __init _omap4_disable_early_timeout(void)
130 {
131 	_early_timeout = false;
132 
133 	return 0;
134 }
135 arch_initcall(_omap4_disable_early_timeout);
136 
137 static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
138 {
139 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
140 	u32 val;
141 	int ret;
142 	union omap4_timeout timeout = { 0 };
143 
144 	if (clk->clkdm) {
145 		ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
146 		if (ret) {
147 			WARN(1,
148 			     "%s: could not enable %s's clockdomain %s: %d\n",
149 			     __func__, clk_hw_get_name(hw),
150 			     clk->clkdm_name, ret);
151 			return ret;
152 		}
153 	}
154 
155 	if (!clk->enable_bit)
156 		return 0;
157 
158 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
159 
160 	val &= ~OMAP4_MODULEMODE_MASK;
161 	val |= clk->enable_bit;
162 
163 	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
164 
165 	if (test_bit(NO_IDLEST, &clk->flags))
166 		return 0;
167 
168 	/* Wait until module is enabled */
169 	while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
170 		if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
171 			pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
172 			return -EBUSY;
173 		}
174 	}
175 
176 	return 0;
177 }
178 
179 static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
180 {
181 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
182 	u32 val;
183 	union omap4_timeout timeout = { 0 };
184 
185 	if (!clk->enable_bit)
186 		goto exit;
187 
188 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
189 
190 	val &= ~OMAP4_MODULEMODE_MASK;
191 
192 	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
193 
194 	if (test_bit(NO_IDLEST, &clk->flags))
195 		goto exit;
196 
197 	/* Wait until module is disabled */
198 	while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
199 		if (_omap4_is_timeout(&timeout,
200 				      OMAP4_MAX_MODULE_DISABLE_TIME)) {
201 			pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
202 			break;
203 		}
204 	}
205 
206 exit:
207 	if (clk->clkdm)
208 		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
209 }
210 
211 static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
212 {
213 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
214 	u32 val;
215 
216 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
217 
218 	if (val & clk->enable_bit)
219 		return 1;
220 
221 	return 0;
222 }
223 
224 static const struct clk_ops omap4_clkctrl_clk_ops = {
225 	.enable		= _omap4_clkctrl_clk_enable,
226 	.disable	= _omap4_clkctrl_clk_disable,
227 	.is_enabled	= _omap4_clkctrl_clk_is_enabled,
228 	.init		= omap2_init_clk_clkdm,
229 };
230 
231 static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
232 					      void *data)
233 {
234 	struct omap_clkctrl_provider *provider = data;
235 	struct omap_clkctrl_clk *entry;
236 	bool found = false;
237 
238 	if (clkspec->args_count != 2)
239 		return ERR_PTR(-EINVAL);
240 
241 	pr_debug("%s: looking for %x:%x\n", __func__,
242 		 clkspec->args[0], clkspec->args[1]);
243 
244 	list_for_each_entry(entry, &provider->clocks, node) {
245 		if (entry->reg_offset == clkspec->args[0] &&
246 		    entry->bit_offset == clkspec->args[1]) {
247 			found = true;
248 			break;
249 		}
250 	}
251 
252 	if (!found)
253 		return ERR_PTR(-EINVAL);
254 
255 	return entry->clk;
256 }
257 
258 static int __init
259 _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
260 			 struct device_node *node, struct clk_hw *clk_hw,
261 			 u16 offset, u8 bit, const char * const *parents,
262 			 int num_parents, const struct clk_ops *ops)
263 {
264 	struct clk_init_data init = { NULL };
265 	struct clk *clk;
266 	struct omap_clkctrl_clk *clkctrl_clk;
267 	int ret = 0;
268 
269 	if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
270 		init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
271 				      node->parent, node, offset,
272 				      bit);
273 	else
274 		init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", node,
275 				      offset, bit);
276 	clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
277 	if (!init.name || !clkctrl_clk) {
278 		ret = -ENOMEM;
279 		goto cleanup;
280 	}
281 
282 	clk_hw->init = &init;
283 	init.parent_names = parents;
284 	init.num_parents = num_parents;
285 	init.ops = ops;
286 	init.flags = 0;
287 
288 	clk = ti_clk_register(NULL, clk_hw, init.name);
289 	if (IS_ERR_OR_NULL(clk)) {
290 		ret = -EINVAL;
291 		goto cleanup;
292 	}
293 
294 	clkctrl_clk->reg_offset = offset;
295 	clkctrl_clk->bit_offset = bit;
296 	clkctrl_clk->clk = clk_hw;
297 
298 	list_add(&clkctrl_clk->node, &provider->clocks);
299 
300 	return 0;
301 
302 cleanup:
303 	kfree(init.name);
304 	kfree(clkctrl_clk);
305 	return ret;
306 }
307 
308 static void __init
309 _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
310 		       struct device_node *node, u16 offset,
311 		       const struct omap_clkctrl_bit_data *data,
312 		       void __iomem *reg)
313 {
314 	struct clk_hw_omap *clk_hw;
315 
316 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
317 	if (!clk_hw)
318 		return;
319 
320 	clk_hw->enable_bit = data->bit;
321 	clk_hw->enable_reg.ptr = reg;
322 
323 	if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
324 				     data->bit, data->parents, 1,
325 				     &omap_gate_clk_ops))
326 		kfree(clk_hw);
327 }
328 
329 static void __init
330 _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
331 		      struct device_node *node, u16 offset,
332 		      const struct omap_clkctrl_bit_data *data,
333 		      void __iomem *reg)
334 {
335 	struct clk_omap_mux *mux;
336 	int num_parents = 0;
337 	const char * const *pname;
338 
339 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
340 	if (!mux)
341 		return;
342 
343 	pname = data->parents;
344 	while (*pname) {
345 		num_parents++;
346 		pname++;
347 	}
348 
349 	mux->mask = num_parents;
350 	if (!(mux->flags & CLK_MUX_INDEX_ONE))
351 		mux->mask--;
352 
353 	mux->mask = (1 << fls(mux->mask)) - 1;
354 
355 	mux->shift = data->bit;
356 	mux->reg.ptr = reg;
357 
358 	if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
359 				     data->bit, data->parents, num_parents,
360 				     &ti_clk_mux_ops))
361 		kfree(mux);
362 }
363 
364 static void __init
365 _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
366 		      struct device_node *node, u16 offset,
367 		      const struct omap_clkctrl_bit_data *data,
368 		      void __iomem *reg)
369 {
370 	struct clk_omap_divider *div;
371 	const struct omap_clkctrl_div_data *div_data = data->data;
372 	u8 div_flags = 0;
373 
374 	div = kzalloc(sizeof(*div), GFP_KERNEL);
375 	if (!div)
376 		return;
377 
378 	div->reg.ptr = reg;
379 	div->shift = data->bit;
380 	div->flags = div_data->flags;
381 
382 	if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
383 		div_flags |= CLKF_INDEX_POWER_OF_TWO;
384 
385 	if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
386 				      div_data->max_div, div_flags,
387 				      div)) {
388 		pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
389 		       node, offset, data->bit);
390 		kfree(div);
391 		return;
392 	}
393 
394 	if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
395 				     data->bit, data->parents, 1,
396 				     &ti_clk_divider_ops))
397 		kfree(div);
398 }
399 
400 static void __init
401 _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
402 			  struct device_node *node,
403 			  const struct omap_clkctrl_reg_data *data,
404 			  void __iomem *reg)
405 {
406 	const struct omap_clkctrl_bit_data *bits = data->bit_data;
407 
408 	if (!bits)
409 		return;
410 
411 	while (bits->bit) {
412 		switch (bits->type) {
413 		case TI_CLK_GATE:
414 			_ti_clkctrl_setup_gate(provider, node, data->offset,
415 					       bits, reg);
416 			break;
417 
418 		case TI_CLK_DIVIDER:
419 			_ti_clkctrl_setup_div(provider, node, data->offset,
420 					      bits, reg);
421 			break;
422 
423 		case TI_CLK_MUX:
424 			_ti_clkctrl_setup_mux(provider, node, data->offset,
425 					      bits, reg);
426 			break;
427 
428 		default:
429 			pr_err("%s: bad subclk type: %d\n", __func__,
430 			       bits->type);
431 			return;
432 		}
433 		bits++;
434 	}
435 }
436 
437 static void __init _clkctrl_add_provider(void *data,
438 					 struct device_node *np)
439 {
440 	of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
441 }
442 
443 static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
444 {
445 	struct omap_clkctrl_provider *provider;
446 	const struct omap_clkctrl_data *data = default_clkctrl_data;
447 	const struct omap_clkctrl_reg_data *reg_data;
448 	struct clk_init_data init = { NULL };
449 	struct clk_hw_omap *hw;
450 	struct clk *clk;
451 	struct omap_clkctrl_clk *clkctrl_clk;
452 	const __be32 *addrp;
453 	u32 addr;
454 	int ret;
455 	char *c;
456 	u16 soc_mask = 0;
457 
458 	if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
459 	    of_node_name_eq(node, "clk"))
460 		ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
461 
462 	addrp = of_get_address(node, 0, NULL, NULL);
463 	addr = (u32)of_translate_address(node, addrp);
464 
465 #ifdef CONFIG_ARCH_OMAP4
466 	if (of_machine_is_compatible("ti,omap4"))
467 		data = omap4_clkctrl_data;
468 #endif
469 #ifdef CONFIG_SOC_OMAP5
470 	if (of_machine_is_compatible("ti,omap5"))
471 		data = omap5_clkctrl_data;
472 #endif
473 #ifdef CONFIG_SOC_DRA7XX
474 	if (of_machine_is_compatible("ti,dra7")) {
475 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
476 			data = dra7_clkctrl_compat_data;
477 		else
478 			data = dra7_clkctrl_data;
479 	}
480 
481 	if (of_machine_is_compatible("ti,dra72"))
482 		soc_mask = CLKF_SOC_DRA72;
483 	if (of_machine_is_compatible("ti,dra74"))
484 		soc_mask = CLKF_SOC_DRA74;
485 	if (of_machine_is_compatible("ti,dra76"))
486 		soc_mask = CLKF_SOC_DRA76;
487 #endif
488 #ifdef CONFIG_SOC_AM33XX
489 	if (of_machine_is_compatible("ti,am33xx")) {
490 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
491 			data = am3_clkctrl_compat_data;
492 		else
493 			data = am3_clkctrl_data;
494 	}
495 #endif
496 #ifdef CONFIG_SOC_AM43XX
497 	if (of_machine_is_compatible("ti,am4372")) {
498 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
499 			data = am4_clkctrl_compat_data;
500 		else
501 			data = am4_clkctrl_data;
502 	}
503 
504 	if (of_machine_is_compatible("ti,am438x")) {
505 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
506 			data = am438x_clkctrl_compat_data;
507 		else
508 			data = am438x_clkctrl_data;
509 	}
510 #endif
511 #ifdef CONFIG_SOC_TI81XX
512 	if (of_machine_is_compatible("ti,dm814"))
513 		data = dm814_clkctrl_data;
514 
515 	if (of_machine_is_compatible("ti,dm816"))
516 		data = dm816_clkctrl_data;
517 #endif
518 
519 	if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
520 		soc_mask |= CLKF_SOC_NONSEC;
521 
522 	while (data->addr) {
523 		if (addr == data->addr)
524 			break;
525 
526 		data++;
527 	}
528 
529 	if (!data->addr) {
530 		pr_err("%pOF not found from clkctrl data.\n", node);
531 		return;
532 	}
533 
534 	provider = kzalloc(sizeof(*provider), GFP_KERNEL);
535 	if (!provider)
536 		return;
537 
538 	provider->base = of_iomap(node, 0);
539 
540 	if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) {
541 		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
542 		if (!provider->clkdm_name) {
543 			kfree(provider);
544 			return;
545 		}
546 
547 		/*
548 		 * Create default clkdm name, replace _cm from end of parent
549 		 * node name with _clkdm
550 		 */
551 		provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
552 	} else {
553 		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
554 		if (!provider->clkdm_name) {
555 			kfree(provider);
556 			return;
557 		}
558 
559 		/*
560 		 * Create default clkdm name, replace _clkctrl from end of
561 		 * node name with _clkdm
562 		 */
563 		provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
564 	}
565 
566 	strcat(provider->clkdm_name, "clkdm");
567 
568 	/* Replace any dash from the clkdm name with underscore */
569 	c = provider->clkdm_name;
570 
571 	while (*c) {
572 		if (*c == '-')
573 			*c = '_';
574 		c++;
575 	}
576 
577 	INIT_LIST_HEAD(&provider->clocks);
578 
579 	/* Generate clocks */
580 	reg_data = data->regs;
581 
582 	while (reg_data->parent) {
583 		if ((reg_data->flags & CLKF_SOC_MASK) &&
584 		    (reg_data->flags & soc_mask) == 0) {
585 			reg_data++;
586 			continue;
587 		}
588 
589 		hw = kzalloc(sizeof(*hw), GFP_KERNEL);
590 		if (!hw)
591 			return;
592 
593 		hw->enable_reg.ptr = provider->base + reg_data->offset;
594 
595 		_ti_clkctrl_setup_subclks(provider, node, reg_data,
596 					  hw->enable_reg.ptr);
597 
598 		if (reg_data->flags & CLKF_SW_SUP)
599 			hw->enable_bit = MODULEMODE_SWCTRL;
600 		if (reg_data->flags & CLKF_HW_SUP)
601 			hw->enable_bit = MODULEMODE_HWCTRL;
602 		if (reg_data->flags & CLKF_NO_IDLEST)
603 			set_bit(NO_IDLEST, &hw->flags);
604 
605 		if (reg_data->clkdm_name)
606 			hw->clkdm_name = reg_data->clkdm_name;
607 		else
608 			hw->clkdm_name = provider->clkdm_name;
609 
610 		init.parent_names = &reg_data->parent;
611 		init.num_parents = 1;
612 		init.flags = 0;
613 		if (reg_data->flags & CLKF_SET_RATE_PARENT)
614 			init.flags |= CLK_SET_RATE_PARENT;
615 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
616 			init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
617 					      node->parent, node,
618 					      reg_data->offset, 0);
619 		else
620 			init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d",
621 					      node, reg_data->offset, 0);
622 		clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
623 		if (!init.name || !clkctrl_clk)
624 			goto cleanup;
625 
626 		init.ops = &omap4_clkctrl_clk_ops;
627 		hw->hw.init = &init;
628 
629 		clk = ti_clk_register_omap_hw(NULL, &hw->hw, init.name);
630 		if (IS_ERR_OR_NULL(clk))
631 			goto cleanup;
632 
633 		clkctrl_clk->reg_offset = reg_data->offset;
634 		clkctrl_clk->clk = &hw->hw;
635 
636 		list_add(&clkctrl_clk->node, &provider->clocks);
637 
638 		reg_data++;
639 	}
640 
641 	ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
642 	if (ret == -EPROBE_DEFER)
643 		ti_clk_retry_init(node, provider, _clkctrl_add_provider);
644 
645 	return;
646 
647 cleanup:
648 	kfree(hw);
649 	kfree(init.name);
650 	kfree(clkctrl_clk);
651 }
652 CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
653 	       _ti_omap4_clkctrl_setup);
654 
655 /**
656  * ti_clk_is_in_standby - Check if clkctrl clock is in standby or not
657  * @clk: clock to check standby status for
658  *
659  * Finds whether the provided clock is in standby mode or not. Returns
660  * true if the provided clock is a clkctrl type clock and it is in standby,
661  * false otherwise.
662  */
663 bool ti_clk_is_in_standby(struct clk *clk)
664 {
665 	struct clk_hw *hw;
666 	struct clk_hw_omap *hwclk;
667 	u32 val;
668 
669 	hw = __clk_get_hw(clk);
670 
671 	if (!omap2_clk_is_hw_omap(hw))
672 		return false;
673 
674 	hwclk = to_clk_hw_omap(hw);
675 
676 	val = ti_clk_ll_ops->clk_readl(&hwclk->enable_reg);
677 
678 	if (val & OMAP4_STBYST_MASK)
679 		return true;
680 
681 	return false;
682 }
683 EXPORT_SYMBOL_GPL(ti_clk_is_in_standby);
684