1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
4 *
5 * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/device.h>
10 #include <linux/io.h>
11 #include <linux/pm.h>
12 #include <linux/pm_clock.h>
13 #include <linux/clk.h>
14 #include <linux/clkdev.h>
15 #include <linux/of_clk.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/pm_domain.h>
19 #include <linux/pm_runtime.h>
20
21 #ifdef CONFIG_PM_CLK
22
23 enum pce_status {
24 PCE_STATUS_NONE = 0,
25 PCE_STATUS_ACQUIRED,
26 PCE_STATUS_PREPARED,
27 PCE_STATUS_ENABLED,
28 PCE_STATUS_ERROR,
29 };
30
31 struct pm_clock_entry {
32 struct list_head node;
33 char *con_id;
34 struct clk *clk;
35 enum pce_status status;
36 bool enabled_when_prepared;
37 };
38
39 /**
40 * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
41 * entry list.
42 * @psd: pm_subsys_data instance corresponding to the PM clock entry list
43 * and clk_op_might_sleep count to be modified.
44 *
45 * Get exclusive access before modifying the PM clock entry list and the
46 * clock_op_might_sleep count to guard against concurrent modifications.
47 * This also protects against a concurrent clock_op_might_sleep and PM clock
48 * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
49 * happen in atomic context, hence both the mutex and the spinlock must be
50 * taken here.
51 */
pm_clk_list_lock(struct pm_subsys_data * psd)52 static void pm_clk_list_lock(struct pm_subsys_data *psd)
53 __acquires(&psd->lock)
54 {
55 mutex_lock(&psd->clock_mutex);
56 spin_lock_irq(&psd->lock);
57 }
58
59 /**
60 * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
61 * @psd: the same pm_subsys_data instance previously passed to
62 * pm_clk_list_lock().
63 */
pm_clk_list_unlock(struct pm_subsys_data * psd)64 static void pm_clk_list_unlock(struct pm_subsys_data *psd)
65 __releases(&psd->lock)
66 {
67 spin_unlock_irq(&psd->lock);
68 mutex_unlock(&psd->clock_mutex);
69 }
70
71 /**
72 * pm_clk_op_lock - ensure exclusive access for performing clock operations.
73 * @psd: pm_subsys_data instance corresponding to the PM clock entry list
74 * and clk_op_might_sleep count being used.
75 * @flags: stored irq flags.
76 * @fn: string for the caller function's name.
77 *
78 * This is used by pm_clk_suspend() and pm_clk_resume() to guard
79 * against concurrent modifications to the clock entry list and the
80 * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
81 * only the mutex can be locked and those functions can only be used in
82 * non atomic context. If clock_op_might_sleep == 0 then these functions
83 * may be used in any context and only the spinlock can be locked.
84 * Returns -EINVAL if called in atomic context when clock ops might sleep.
85 */
pm_clk_op_lock(struct pm_subsys_data * psd,unsigned long * flags,const char * fn)86 static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
87 const char *fn)
88 /* sparse annotations don't work here as exit state isn't static */
89 {
90 bool atomic_context = in_atomic() || irqs_disabled();
91
92 try_again:
93 spin_lock_irqsave(&psd->lock, *flags);
94 if (!psd->clock_op_might_sleep) {
95 /* the __release is there to work around sparse limitations */
96 __release(&psd->lock);
97 return 0;
98 }
99
100 /* bail out if in atomic context */
101 if (atomic_context) {
102 pr_err("%s: atomic context with clock_ops_might_sleep = %d",
103 fn, psd->clock_op_might_sleep);
104 spin_unlock_irqrestore(&psd->lock, *flags);
105 might_sleep();
106 return -EPERM;
107 }
108
109 /* we must switch to the mutex */
110 spin_unlock_irqrestore(&psd->lock, *flags);
111 mutex_lock(&psd->clock_mutex);
112
113 /*
114 * There was a possibility for psd->clock_op_might_sleep
115 * to become 0 above. Keep the mutex only if not the case.
116 */
117 if (likely(psd->clock_op_might_sleep))
118 return 0;
119
120 mutex_unlock(&psd->clock_mutex);
121 goto try_again;
122 }
123
124 /**
125 * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
126 * @psd: the same pm_subsys_data instance previously passed to
127 * pm_clk_op_lock().
128 * @flags: irq flags provided by pm_clk_op_lock().
129 */
pm_clk_op_unlock(struct pm_subsys_data * psd,unsigned long * flags)130 static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
131 /* sparse annotations don't work here as entry state isn't static */
132 {
133 if (psd->clock_op_might_sleep) {
134 mutex_unlock(&psd->clock_mutex);
135 } else {
136 /* the __acquire is there to work around sparse limitations */
137 __acquire(&psd->lock);
138 spin_unlock_irqrestore(&psd->lock, *flags);
139 }
140 }
141
142 /**
143 * __pm_clk_enable - Enable a clock, reporting any errors
144 * @dev: The device for the given clock
145 * @ce: PM clock entry corresponding to the clock.
146 */
__pm_clk_enable(struct device * dev,struct pm_clock_entry * ce)147 static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
148 {
149 int ret;
150
151 switch (ce->status) {
152 case PCE_STATUS_ACQUIRED:
153 ret = clk_prepare_enable(ce->clk);
154 break;
155 case PCE_STATUS_PREPARED:
156 ret = clk_enable(ce->clk);
157 break;
158 default:
159 return;
160 }
161 if (!ret)
162 ce->status = PCE_STATUS_ENABLED;
163 else
164 dev_err(dev, "%s: failed to enable clk %p, error %d\n",
165 __func__, ce->clk, ret);
166 }
167
168 /**
169 * pm_clk_acquire - Acquire a device clock.
170 * @dev: Device whose clock is to be acquired.
171 * @ce: PM clock entry corresponding to the clock.
172 */
pm_clk_acquire(struct device * dev,struct pm_clock_entry * ce)173 static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
174 {
175 if (!ce->clk)
176 ce->clk = clk_get(dev, ce->con_id);
177 if (IS_ERR(ce->clk)) {
178 ce->status = PCE_STATUS_ERROR;
179 return;
180 } else if (clk_is_enabled_when_prepared(ce->clk)) {
181 /* we defer preparing the clock in that case */
182 ce->status = PCE_STATUS_ACQUIRED;
183 ce->enabled_when_prepared = true;
184 } else if (clk_prepare(ce->clk)) {
185 ce->status = PCE_STATUS_ERROR;
186 dev_err(dev, "clk_prepare() failed\n");
187 return;
188 } else {
189 ce->status = PCE_STATUS_PREPARED;
190 }
191 dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
192 ce->clk, ce->con_id);
193 }
194
__pm_clk_add(struct device * dev,const char * con_id,struct clk * clk)195 static int __pm_clk_add(struct device *dev, const char *con_id,
196 struct clk *clk)
197 {
198 struct pm_subsys_data *psd = dev_to_psd(dev);
199 struct pm_clock_entry *ce;
200
201 if (!psd)
202 return -EINVAL;
203
204 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
205 if (!ce)
206 return -ENOMEM;
207
208 if (con_id) {
209 ce->con_id = kstrdup(con_id, GFP_KERNEL);
210 if (!ce->con_id) {
211 kfree(ce);
212 return -ENOMEM;
213 }
214 } else {
215 if (IS_ERR(clk)) {
216 kfree(ce);
217 return -ENOENT;
218 }
219 ce->clk = clk;
220 }
221
222 pm_clk_acquire(dev, ce);
223
224 pm_clk_list_lock(psd);
225 list_add_tail(&ce->node, &psd->clock_list);
226 if (ce->enabled_when_prepared)
227 psd->clock_op_might_sleep++;
228 pm_clk_list_unlock(psd);
229 return 0;
230 }
231
232 /**
233 * pm_clk_add - Start using a device clock for power management.
234 * @dev: Device whose clock is going to be used for power management.
235 * @con_id: Connection ID of the clock.
236 *
237 * Add the clock represented by @con_id to the list of clocks used for
238 * the power management of @dev.
239 */
pm_clk_add(struct device * dev,const char * con_id)240 int pm_clk_add(struct device *dev, const char *con_id)
241 {
242 return __pm_clk_add(dev, con_id, NULL);
243 }
244 EXPORT_SYMBOL_GPL(pm_clk_add);
245
246 /**
247 * pm_clk_add_clk - Start using a device clock for power management.
248 * @dev: Device whose clock is going to be used for power management.
249 * @clk: Clock pointer
250 *
251 * Add the clock to the list of clocks used for the power management of @dev.
252 * The power-management code will take control of the clock reference, so
253 * callers should not call clk_put() on @clk after this function sucessfully
254 * returned.
255 */
pm_clk_add_clk(struct device * dev,struct clk * clk)256 int pm_clk_add_clk(struct device *dev, struct clk *clk)
257 {
258 return __pm_clk_add(dev, NULL, clk);
259 }
260 EXPORT_SYMBOL_GPL(pm_clk_add_clk);
261
262
263 /**
264 * of_pm_clk_add_clk - Start using a device clock for power management.
265 * @dev: Device whose clock is going to be used for power management.
266 * @name: Name of clock that is going to be used for power management.
267 *
268 * Add the clock described in the 'clocks' device-tree node that matches
269 * with the 'name' provided, to the list of clocks used for the power
270 * management of @dev. On success, returns 0. Returns a negative error
271 * code if the clock is not found or cannot be added.
272 */
of_pm_clk_add_clk(struct device * dev,const char * name)273 int of_pm_clk_add_clk(struct device *dev, const char *name)
274 {
275 struct clk *clk;
276 int ret;
277
278 if (!dev || !dev->of_node || !name)
279 return -EINVAL;
280
281 clk = of_clk_get_by_name(dev->of_node, name);
282 if (IS_ERR(clk))
283 return PTR_ERR(clk);
284
285 ret = pm_clk_add_clk(dev, clk);
286 if (ret) {
287 clk_put(clk);
288 return ret;
289 }
290
291 return 0;
292 }
293 EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
294
295 /**
296 * of_pm_clk_add_clks - Start using device clock(s) for power management.
297 * @dev: Device whose clock(s) is going to be used for power management.
298 *
299 * Add a series of clocks described in the 'clocks' device-tree node for
300 * a device to the list of clocks used for the power management of @dev.
301 * On success, returns the number of clocks added. Returns a negative
302 * error code if there are no clocks in the device node for the device
303 * or if adding a clock fails.
304 */
of_pm_clk_add_clks(struct device * dev)305 int of_pm_clk_add_clks(struct device *dev)
306 {
307 struct clk **clks;
308 int i, count;
309 int ret;
310
311 if (!dev || !dev->of_node)
312 return -EINVAL;
313
314 count = of_clk_get_parent_count(dev->of_node);
315 if (count <= 0)
316 return -ENODEV;
317
318 clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
319 if (!clks)
320 return -ENOMEM;
321
322 for (i = 0; i < count; i++) {
323 clks[i] = of_clk_get(dev->of_node, i);
324 if (IS_ERR(clks[i])) {
325 ret = PTR_ERR(clks[i]);
326 goto error;
327 }
328
329 ret = pm_clk_add_clk(dev, clks[i]);
330 if (ret) {
331 clk_put(clks[i]);
332 goto error;
333 }
334 }
335
336 kfree(clks);
337
338 return i;
339
340 error:
341 while (i--)
342 pm_clk_remove_clk(dev, clks[i]);
343
344 kfree(clks);
345
346 return ret;
347 }
348 EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
349
350 /**
351 * __pm_clk_remove - Destroy PM clock entry.
352 * @ce: PM clock entry to destroy.
353 */
__pm_clk_remove(struct pm_clock_entry * ce)354 static void __pm_clk_remove(struct pm_clock_entry *ce)
355 {
356 if (!ce)
357 return;
358
359 switch (ce->status) {
360 case PCE_STATUS_ENABLED:
361 clk_disable(ce->clk);
362 fallthrough;
363 case PCE_STATUS_PREPARED:
364 clk_unprepare(ce->clk);
365 fallthrough;
366 case PCE_STATUS_ACQUIRED:
367 case PCE_STATUS_ERROR:
368 if (!IS_ERR(ce->clk))
369 clk_put(ce->clk);
370 break;
371 default:
372 break;
373 }
374
375 kfree(ce->con_id);
376 kfree(ce);
377 }
378
379 /**
380 * pm_clk_remove - Stop using a device clock for power management.
381 * @dev: Device whose clock should not be used for PM any more.
382 * @con_id: Connection ID of the clock.
383 *
384 * Remove the clock represented by @con_id from the list of clocks used for
385 * the power management of @dev.
386 */
pm_clk_remove(struct device * dev,const char * con_id)387 void pm_clk_remove(struct device *dev, const char *con_id)
388 {
389 struct pm_subsys_data *psd = dev_to_psd(dev);
390 struct pm_clock_entry *ce;
391
392 if (!psd)
393 return;
394
395 pm_clk_list_lock(psd);
396
397 list_for_each_entry(ce, &psd->clock_list, node) {
398 if (!con_id && !ce->con_id)
399 goto remove;
400 else if (!con_id || !ce->con_id)
401 continue;
402 else if (!strcmp(con_id, ce->con_id))
403 goto remove;
404 }
405
406 pm_clk_list_unlock(psd);
407 return;
408
409 remove:
410 list_del(&ce->node);
411 if (ce->enabled_when_prepared)
412 psd->clock_op_might_sleep--;
413 pm_clk_list_unlock(psd);
414
415 __pm_clk_remove(ce);
416 }
417 EXPORT_SYMBOL_GPL(pm_clk_remove);
418
419 /**
420 * pm_clk_remove_clk - Stop using a device clock for power management.
421 * @dev: Device whose clock should not be used for PM any more.
422 * @clk: Clock pointer
423 *
424 * Remove the clock pointed to by @clk from the list of clocks used for
425 * the power management of @dev.
426 */
pm_clk_remove_clk(struct device * dev,struct clk * clk)427 void pm_clk_remove_clk(struct device *dev, struct clk *clk)
428 {
429 struct pm_subsys_data *psd = dev_to_psd(dev);
430 struct pm_clock_entry *ce;
431
432 if (!psd || !clk)
433 return;
434
435 pm_clk_list_lock(psd);
436
437 list_for_each_entry(ce, &psd->clock_list, node) {
438 if (clk == ce->clk)
439 goto remove;
440 }
441
442 pm_clk_list_unlock(psd);
443 return;
444
445 remove:
446 list_del(&ce->node);
447 if (ce->enabled_when_prepared)
448 psd->clock_op_might_sleep--;
449 pm_clk_list_unlock(psd);
450
451 __pm_clk_remove(ce);
452 }
453 EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
454
455 /**
456 * pm_clk_init - Initialize a device's list of power management clocks.
457 * @dev: Device to initialize the list of PM clocks for.
458 *
459 * Initialize the lock and clock_list members of the device's pm_subsys_data
460 * object, set the count of clocks that might sleep to 0.
461 */
pm_clk_init(struct device * dev)462 void pm_clk_init(struct device *dev)
463 {
464 struct pm_subsys_data *psd = dev_to_psd(dev);
465 if (psd) {
466 INIT_LIST_HEAD(&psd->clock_list);
467 mutex_init(&psd->clock_mutex);
468 psd->clock_op_might_sleep = 0;
469 }
470 }
471 EXPORT_SYMBOL_GPL(pm_clk_init);
472
473 /**
474 * pm_clk_create - Create and initialize a device's list of PM clocks.
475 * @dev: Device to create and initialize the list of PM clocks for.
476 *
477 * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
478 * members and make the @dev's power.subsys_data field point to it.
479 */
pm_clk_create(struct device * dev)480 int pm_clk_create(struct device *dev)
481 {
482 return dev_pm_get_subsys_data(dev);
483 }
484 EXPORT_SYMBOL_GPL(pm_clk_create);
485
486 /**
487 * pm_clk_destroy - Destroy a device's list of power management clocks.
488 * @dev: Device to destroy the list of PM clocks for.
489 *
490 * Clear the @dev's power.subsys_data field, remove the list of clock entries
491 * from the struct pm_subsys_data object pointed to by it before and free
492 * that object.
493 */
pm_clk_destroy(struct device * dev)494 void pm_clk_destroy(struct device *dev)
495 {
496 struct pm_subsys_data *psd = dev_to_psd(dev);
497 struct pm_clock_entry *ce, *c;
498 struct list_head list;
499
500 if (!psd)
501 return;
502
503 INIT_LIST_HEAD(&list);
504
505 pm_clk_list_lock(psd);
506
507 list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
508 list_move(&ce->node, &list);
509 psd->clock_op_might_sleep = 0;
510
511 pm_clk_list_unlock(psd);
512
513 dev_pm_put_subsys_data(dev);
514
515 list_for_each_entry_safe_reverse(ce, c, &list, node) {
516 list_del(&ce->node);
517 __pm_clk_remove(ce);
518 }
519 }
520 EXPORT_SYMBOL_GPL(pm_clk_destroy);
521
pm_clk_destroy_action(void * data)522 static void pm_clk_destroy_action(void *data)
523 {
524 pm_clk_destroy(data);
525 }
526
devm_pm_clk_create(struct device * dev)527 int devm_pm_clk_create(struct device *dev)
528 {
529 int ret;
530
531 ret = pm_clk_create(dev);
532 if (ret)
533 return ret;
534
535 return devm_add_action_or_reset(dev, pm_clk_destroy_action, dev);
536 }
537 EXPORT_SYMBOL_GPL(devm_pm_clk_create);
538
539 /**
540 * pm_clk_suspend - Disable clocks in a device's PM clock list.
541 * @dev: Device to disable the clocks for.
542 */
pm_clk_suspend(struct device * dev)543 int pm_clk_suspend(struct device *dev)
544 {
545 struct pm_subsys_data *psd = dev_to_psd(dev);
546 struct pm_clock_entry *ce;
547 unsigned long flags;
548 int ret;
549
550 dev_dbg(dev, "%s()\n", __func__);
551
552 if (!psd)
553 return 0;
554
555 ret = pm_clk_op_lock(psd, &flags, __func__);
556 if (ret)
557 return ret;
558
559 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
560 if (ce->status == PCE_STATUS_ENABLED) {
561 if (ce->enabled_when_prepared) {
562 clk_disable_unprepare(ce->clk);
563 ce->status = PCE_STATUS_ACQUIRED;
564 } else {
565 clk_disable(ce->clk);
566 ce->status = PCE_STATUS_PREPARED;
567 }
568 }
569 }
570
571 pm_clk_op_unlock(psd, &flags);
572
573 return 0;
574 }
575 EXPORT_SYMBOL_GPL(pm_clk_suspend);
576
577 /**
578 * pm_clk_resume - Enable clocks in a device's PM clock list.
579 * @dev: Device to enable the clocks for.
580 */
pm_clk_resume(struct device * dev)581 int pm_clk_resume(struct device *dev)
582 {
583 struct pm_subsys_data *psd = dev_to_psd(dev);
584 struct pm_clock_entry *ce;
585 unsigned long flags;
586 int ret;
587
588 dev_dbg(dev, "%s()\n", __func__);
589
590 if (!psd)
591 return 0;
592
593 ret = pm_clk_op_lock(psd, &flags, __func__);
594 if (ret)
595 return ret;
596
597 list_for_each_entry(ce, &psd->clock_list, node)
598 __pm_clk_enable(dev, ce);
599
600 pm_clk_op_unlock(psd, &flags);
601
602 return 0;
603 }
604 EXPORT_SYMBOL_GPL(pm_clk_resume);
605
606 /**
607 * pm_clk_notify - Notify routine for device addition and removal.
608 * @nb: Notifier block object this function is a member of.
609 * @action: Operation being carried out by the caller.
610 * @data: Device the routine is being run for.
611 *
612 * For this function to work, @nb must be a member of an object of type
613 * struct pm_clk_notifier_block containing all of the requisite data.
614 * Specifically, the pm_domain member of that object is copied to the device's
615 * pm_domain field and its con_ids member is used to populate the device's list
616 * of PM clocks, depending on @action.
617 *
618 * If the device's pm_domain field is already populated with a value different
619 * from the one stored in the struct pm_clk_notifier_block object, the function
620 * does nothing.
621 */
pm_clk_notify(struct notifier_block * nb,unsigned long action,void * data)622 static int pm_clk_notify(struct notifier_block *nb,
623 unsigned long action, void *data)
624 {
625 struct pm_clk_notifier_block *clknb;
626 struct device *dev = data;
627 char **con_id;
628 int error;
629
630 dev_dbg(dev, "%s() %ld\n", __func__, action);
631
632 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
633
634 switch (action) {
635 case BUS_NOTIFY_ADD_DEVICE:
636 if (dev->pm_domain)
637 break;
638
639 error = pm_clk_create(dev);
640 if (error)
641 break;
642
643 dev_pm_domain_set(dev, clknb->pm_domain);
644 if (clknb->con_ids[0]) {
645 for (con_id = clknb->con_ids; *con_id; con_id++)
646 pm_clk_add(dev, *con_id);
647 } else {
648 pm_clk_add(dev, NULL);
649 }
650
651 break;
652 case BUS_NOTIFY_DEL_DEVICE:
653 if (dev->pm_domain != clknb->pm_domain)
654 break;
655
656 dev_pm_domain_set(dev, NULL);
657 pm_clk_destroy(dev);
658 break;
659 }
660
661 return 0;
662 }
663
pm_clk_runtime_suspend(struct device * dev)664 int pm_clk_runtime_suspend(struct device *dev)
665 {
666 int ret;
667
668 dev_dbg(dev, "%s\n", __func__);
669
670 ret = pm_generic_runtime_suspend(dev);
671 if (ret) {
672 dev_err(dev, "failed to suspend device\n");
673 return ret;
674 }
675
676 ret = pm_clk_suspend(dev);
677 if (ret) {
678 dev_err(dev, "failed to suspend clock\n");
679 pm_generic_runtime_resume(dev);
680 return ret;
681 }
682
683 return 0;
684 }
685 EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
686
pm_clk_runtime_resume(struct device * dev)687 int pm_clk_runtime_resume(struct device *dev)
688 {
689 int ret;
690
691 dev_dbg(dev, "%s\n", __func__);
692
693 ret = pm_clk_resume(dev);
694 if (ret) {
695 dev_err(dev, "failed to resume clock\n");
696 return ret;
697 }
698
699 return pm_generic_runtime_resume(dev);
700 }
701 EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
702
703 #else /* !CONFIG_PM_CLK */
704
705 /**
706 * enable_clock - Enable a device clock.
707 * @dev: Device whose clock is to be enabled.
708 * @con_id: Connection ID of the clock.
709 */
enable_clock(struct device * dev,const char * con_id)710 static void enable_clock(struct device *dev, const char *con_id)
711 {
712 struct clk *clk;
713
714 clk = clk_get(dev, con_id);
715 if (!IS_ERR(clk)) {
716 clk_prepare_enable(clk);
717 clk_put(clk);
718 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
719 }
720 }
721
722 /**
723 * disable_clock - Disable a device clock.
724 * @dev: Device whose clock is to be disabled.
725 * @con_id: Connection ID of the clock.
726 */
disable_clock(struct device * dev,const char * con_id)727 static void disable_clock(struct device *dev, const char *con_id)
728 {
729 struct clk *clk;
730
731 clk = clk_get(dev, con_id);
732 if (!IS_ERR(clk)) {
733 clk_disable_unprepare(clk);
734 clk_put(clk);
735 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
736 }
737 }
738
739 /**
740 * pm_clk_notify - Notify routine for device addition and removal.
741 * @nb: Notifier block object this function is a member of.
742 * @action: Operation being carried out by the caller.
743 * @data: Device the routine is being run for.
744 *
745 * For this function to work, @nb must be a member of an object of type
746 * struct pm_clk_notifier_block containing all of the requisite data.
747 * Specifically, the con_ids member of that object is used to enable or disable
748 * the device's clocks, depending on @action.
749 */
pm_clk_notify(struct notifier_block * nb,unsigned long action,void * data)750 static int pm_clk_notify(struct notifier_block *nb,
751 unsigned long action, void *data)
752 {
753 struct pm_clk_notifier_block *clknb;
754 struct device *dev = data;
755 char **con_id;
756
757 dev_dbg(dev, "%s() %ld\n", __func__, action);
758
759 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
760
761 switch (action) {
762 case BUS_NOTIFY_BIND_DRIVER:
763 if (clknb->con_ids[0]) {
764 for (con_id = clknb->con_ids; *con_id; con_id++)
765 enable_clock(dev, *con_id);
766 } else {
767 enable_clock(dev, NULL);
768 }
769 break;
770 case BUS_NOTIFY_DRIVER_NOT_BOUND:
771 case BUS_NOTIFY_UNBOUND_DRIVER:
772 if (clknb->con_ids[0]) {
773 for (con_id = clknb->con_ids; *con_id; con_id++)
774 disable_clock(dev, *con_id);
775 } else {
776 disable_clock(dev, NULL);
777 }
778 break;
779 }
780
781 return 0;
782 }
783
784 #endif /* !CONFIG_PM_CLK */
785
786 /**
787 * pm_clk_add_notifier - Add bus type notifier for power management clocks.
788 * @bus: Bus type to add the notifier to.
789 * @clknb: Notifier to be added to the given bus type.
790 *
791 * The nb member of @clknb is not expected to be initialized and its
792 * notifier_call member will be replaced with pm_clk_notify(). However,
793 * the remaining members of @clknb should be populated prior to calling this
794 * routine.
795 */
pm_clk_add_notifier(struct bus_type * bus,struct pm_clk_notifier_block * clknb)796 void pm_clk_add_notifier(struct bus_type *bus,
797 struct pm_clk_notifier_block *clknb)
798 {
799 if (!bus || !clknb)
800 return;
801
802 clknb->nb.notifier_call = pm_clk_notify;
803 bus_register_notifier(bus, &clknb->nb);
804 }
805 EXPORT_SYMBOL_GPL(pm_clk_add_notifier);
806