1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic OPP Interface
4 *
5 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
6 * Nishanth Menon
7 * Romit Dasgupta
8 * Kevin Hilman
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/clk.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/device.h>
17 #include <linux/export.h>
18 #include <linux/pm_domain.h>
19 #include <linux/regulator/consumer.h>
20 #include <linux/slab.h>
21 #include <linux/xarray.h>
22
23 #include "opp.h"
24
25 /*
26 * The root of the list of all opp-tables. All opp_table structures branch off
27 * from here, with each opp_table containing the list of opps it supports in
28 * various states of availability.
29 */
30 LIST_HEAD(opp_tables);
31
32 /* Lock to allow exclusive modification to the device and opp lists */
33 DEFINE_MUTEX(opp_table_lock);
34 /* Flag indicating that opp_tables list is being updated at the moment */
35 static bool opp_tables_busy;
36
37 /* OPP ID allocator */
38 static DEFINE_XARRAY_ALLOC1(opp_configs);
39
_find_opp_dev(const struct device * dev,struct opp_table * opp_table)40 static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
41 {
42 struct opp_device *opp_dev;
43 bool found = false;
44
45 mutex_lock(&opp_table->lock);
46 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
47 if (opp_dev->dev == dev) {
48 found = true;
49 break;
50 }
51
52 mutex_unlock(&opp_table->lock);
53 return found;
54 }
55
_find_opp_table_unlocked(struct device * dev)56 static struct opp_table *_find_opp_table_unlocked(struct device *dev)
57 {
58 struct opp_table *opp_table;
59
60 list_for_each_entry(opp_table, &opp_tables, node) {
61 if (_find_opp_dev(dev, opp_table)) {
62 _get_opp_table_kref(opp_table);
63 return opp_table;
64 }
65 }
66
67 return ERR_PTR(-ENODEV);
68 }
69
70 /**
71 * _find_opp_table() - find opp_table struct using device pointer
72 * @dev: device pointer used to lookup OPP table
73 *
74 * Search OPP table for one containing matching device.
75 *
76 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
77 * -EINVAL based on type of error.
78 *
79 * The callers must call dev_pm_opp_put_opp_table() after the table is used.
80 */
_find_opp_table(struct device * dev)81 struct opp_table *_find_opp_table(struct device *dev)
82 {
83 struct opp_table *opp_table;
84
85 if (IS_ERR_OR_NULL(dev)) {
86 pr_err("%s: Invalid parameters\n", __func__);
87 return ERR_PTR(-EINVAL);
88 }
89
90 mutex_lock(&opp_table_lock);
91 opp_table = _find_opp_table_unlocked(dev);
92 mutex_unlock(&opp_table_lock);
93
94 return opp_table;
95 }
96
97 /*
98 * Returns true if multiple clocks aren't there, else returns false with WARN.
99 *
100 * We don't force clk_count == 1 here as there are users who don't have a clock
101 * representation in the OPP table and manage the clock configuration themselves
102 * in an platform specific way.
103 */
assert_single_clk(struct opp_table * opp_table,unsigned int __always_unused index)104 static bool assert_single_clk(struct opp_table *opp_table,
105 unsigned int __always_unused index)
106 {
107 return !WARN_ON(opp_table->clk_count > 1);
108 }
109
110 /*
111 * Returns true if clock table is large enough to contain the clock index.
112 */
assert_clk_index(struct opp_table * opp_table,unsigned int index)113 static bool assert_clk_index(struct opp_table *opp_table,
114 unsigned int index)
115 {
116 return opp_table->clk_count > index;
117 }
118
119 /*
120 * Returns true if bandwidth table is large enough to contain the bandwidth index.
121 */
assert_bandwidth_index(struct opp_table * opp_table,unsigned int index)122 static bool assert_bandwidth_index(struct opp_table *opp_table,
123 unsigned int index)
124 {
125 return opp_table->path_count > index;
126 }
127
128 /**
129 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
130 * @opp: opp for which voltage has to be returned for
131 *
132 * Return: voltage in micro volt corresponding to the opp, else
133 * return 0
134 *
135 * This is useful only for devices with single power supply.
136 */
dev_pm_opp_get_voltage(struct dev_pm_opp * opp)137 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
138 {
139 if (IS_ERR_OR_NULL(opp)) {
140 pr_err("%s: Invalid parameters\n", __func__);
141 return 0;
142 }
143
144 return opp->supplies[0].u_volt;
145 }
146 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
147
148 /**
149 * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp
150 * @opp: opp for which voltage has to be returned for
151 * @supplies: Placeholder for copying the supply information.
152 *
153 * Return: negative error number on failure, 0 otherwise on success after
154 * setting @supplies.
155 *
156 * This can be used for devices with any number of power supplies. The caller
157 * must ensure the @supplies array must contain space for each regulator.
158 */
dev_pm_opp_get_supplies(struct dev_pm_opp * opp,struct dev_pm_opp_supply * supplies)159 int dev_pm_opp_get_supplies(struct dev_pm_opp *opp,
160 struct dev_pm_opp_supply *supplies)
161 {
162 if (IS_ERR_OR_NULL(opp) || !supplies) {
163 pr_err("%s: Invalid parameters\n", __func__);
164 return -EINVAL;
165 }
166
167 memcpy(supplies, opp->supplies,
168 sizeof(*supplies) * opp->opp_table->regulator_count);
169 return 0;
170 }
171 EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies);
172
173 /**
174 * dev_pm_opp_get_power() - Gets the power corresponding to an opp
175 * @opp: opp for which power has to be returned for
176 *
177 * Return: power in micro watt corresponding to the opp, else
178 * return 0
179 *
180 * This is useful only for devices with single power supply.
181 */
dev_pm_opp_get_power(struct dev_pm_opp * opp)182 unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
183 {
184 unsigned long opp_power = 0;
185 int i;
186
187 if (IS_ERR_OR_NULL(opp)) {
188 pr_err("%s: Invalid parameters\n", __func__);
189 return 0;
190 }
191 for (i = 0; i < opp->opp_table->regulator_count; i++)
192 opp_power += opp->supplies[i].u_watt;
193
194 return opp_power;
195 }
196 EXPORT_SYMBOL_GPL(dev_pm_opp_get_power);
197
198 /**
199 * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an
200 * available opp with specified index
201 * @opp: opp for which frequency has to be returned for
202 * @index: index of the frequency within the required opp
203 *
204 * Return: frequency in hertz corresponding to the opp with specified index,
205 * else return 0
206 */
dev_pm_opp_get_freq_indexed(struct dev_pm_opp * opp,u32 index)207 unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
208 {
209 if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) {
210 pr_err("%s: Invalid parameters\n", __func__);
211 return 0;
212 }
213
214 return opp->rates[index];
215 }
216 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed);
217
218 /**
219 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
220 * @opp: opp for which level value has to be returned for
221 *
222 * Return: level read from device tree corresponding to the opp, else
223 * return 0.
224 */
dev_pm_opp_get_level(struct dev_pm_opp * opp)225 unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
226 {
227 if (IS_ERR_OR_NULL(opp) || !opp->available) {
228 pr_err("%s: Invalid parameters\n", __func__);
229 return 0;
230 }
231
232 return opp->level;
233 }
234 EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
235
236 /**
237 * dev_pm_opp_get_required_pstate() - Gets the required performance state
238 * corresponding to an available opp
239 * @opp: opp for which performance state has to be returned for
240 * @index: index of the required opp
241 *
242 * Return: performance state read from device tree corresponding to the
243 * required opp, else return 0.
244 */
dev_pm_opp_get_required_pstate(struct dev_pm_opp * opp,unsigned int index)245 unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
246 unsigned int index)
247 {
248 if (IS_ERR_OR_NULL(opp) || !opp->available ||
249 index >= opp->opp_table->required_opp_count) {
250 pr_err("%s: Invalid parameters\n", __func__);
251 return 0;
252 }
253
254 /* required-opps not fully initialized yet */
255 if (lazy_linking_pending(opp->opp_table))
256 return 0;
257
258 /* The required OPP table must belong to a genpd */
259 if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) {
260 pr_err("%s: Performance state is only valid for genpds.\n", __func__);
261 return 0;
262 }
263
264 return opp->required_opps[index]->level;
265 }
266 EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
267
268 /**
269 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
270 * @opp: opp for which turbo mode is being verified
271 *
272 * Turbo OPPs are not for normal use, and can be enabled (under certain
273 * conditions) for short duration of times to finish high throughput work
274 * quickly. Running on them for longer times may overheat the chip.
275 *
276 * Return: true if opp is turbo opp, else false.
277 */
dev_pm_opp_is_turbo(struct dev_pm_opp * opp)278 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
279 {
280 if (IS_ERR_OR_NULL(opp) || !opp->available) {
281 pr_err("%s: Invalid parameters\n", __func__);
282 return false;
283 }
284
285 return opp->turbo;
286 }
287 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
288
289 /**
290 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
291 * @dev: device for which we do this operation
292 *
293 * Return: This function returns the max clock latency in nanoseconds.
294 */
dev_pm_opp_get_max_clock_latency(struct device * dev)295 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
296 {
297 struct opp_table *opp_table;
298 unsigned long clock_latency_ns;
299
300 opp_table = _find_opp_table(dev);
301 if (IS_ERR(opp_table))
302 return 0;
303
304 clock_latency_ns = opp_table->clock_latency_ns_max;
305
306 dev_pm_opp_put_opp_table(opp_table);
307
308 return clock_latency_ns;
309 }
310 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
311
312 /**
313 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
314 * @dev: device for which we do this operation
315 *
316 * Return: This function returns the max voltage latency in nanoseconds.
317 */
dev_pm_opp_get_max_volt_latency(struct device * dev)318 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
319 {
320 struct opp_table *opp_table;
321 struct dev_pm_opp *opp;
322 struct regulator *reg;
323 unsigned long latency_ns = 0;
324 int ret, i, count;
325 struct {
326 unsigned long min;
327 unsigned long max;
328 } *uV;
329
330 opp_table = _find_opp_table(dev);
331 if (IS_ERR(opp_table))
332 return 0;
333
334 /* Regulator may not be required for the device */
335 if (!opp_table->regulators)
336 goto put_opp_table;
337
338 count = opp_table->regulator_count;
339
340 uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
341 if (!uV)
342 goto put_opp_table;
343
344 mutex_lock(&opp_table->lock);
345
346 for (i = 0; i < count; i++) {
347 uV[i].min = ~0;
348 uV[i].max = 0;
349
350 list_for_each_entry(opp, &opp_table->opp_list, node) {
351 if (!opp->available)
352 continue;
353
354 if (opp->supplies[i].u_volt_min < uV[i].min)
355 uV[i].min = opp->supplies[i].u_volt_min;
356 if (opp->supplies[i].u_volt_max > uV[i].max)
357 uV[i].max = opp->supplies[i].u_volt_max;
358 }
359 }
360
361 mutex_unlock(&opp_table->lock);
362
363 /*
364 * The caller needs to ensure that opp_table (and hence the regulator)
365 * isn't freed, while we are executing this routine.
366 */
367 for (i = 0; i < count; i++) {
368 reg = opp_table->regulators[i];
369 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
370 if (ret > 0)
371 latency_ns += ret * 1000;
372 }
373
374 kfree(uV);
375 put_opp_table:
376 dev_pm_opp_put_opp_table(opp_table);
377
378 return latency_ns;
379 }
380 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
381
382 /**
383 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
384 * nanoseconds
385 * @dev: device for which we do this operation
386 *
387 * Return: This function returns the max transition latency, in nanoseconds, to
388 * switch from one OPP to other.
389 */
dev_pm_opp_get_max_transition_latency(struct device * dev)390 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
391 {
392 return dev_pm_opp_get_max_volt_latency(dev) +
393 dev_pm_opp_get_max_clock_latency(dev);
394 }
395 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
396
397 /**
398 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
399 * @dev: device for which we do this operation
400 *
401 * Return: This function returns the frequency of the OPP marked as suspend_opp
402 * if one is available, else returns 0;
403 */
dev_pm_opp_get_suspend_opp_freq(struct device * dev)404 unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
405 {
406 struct opp_table *opp_table;
407 unsigned long freq = 0;
408
409 opp_table = _find_opp_table(dev);
410 if (IS_ERR(opp_table))
411 return 0;
412
413 if (opp_table->suspend_opp && opp_table->suspend_opp->available)
414 freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
415
416 dev_pm_opp_put_opp_table(opp_table);
417
418 return freq;
419 }
420 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
421
_get_opp_count(struct opp_table * opp_table)422 int _get_opp_count(struct opp_table *opp_table)
423 {
424 struct dev_pm_opp *opp;
425 int count = 0;
426
427 mutex_lock(&opp_table->lock);
428
429 list_for_each_entry(opp, &opp_table->opp_list, node) {
430 if (opp->available)
431 count++;
432 }
433
434 mutex_unlock(&opp_table->lock);
435
436 return count;
437 }
438
439 /**
440 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
441 * @dev: device for which we do this operation
442 *
443 * Return: This function returns the number of available opps if there are any,
444 * else returns 0 if none or the corresponding error value.
445 */
dev_pm_opp_get_opp_count(struct device * dev)446 int dev_pm_opp_get_opp_count(struct device *dev)
447 {
448 struct opp_table *opp_table;
449 int count;
450
451 opp_table = _find_opp_table(dev);
452 if (IS_ERR(opp_table)) {
453 count = PTR_ERR(opp_table);
454 dev_dbg(dev, "%s: OPP table not found (%d)\n",
455 __func__, count);
456 return count;
457 }
458
459 count = _get_opp_count(opp_table);
460 dev_pm_opp_put_opp_table(opp_table);
461
462 return count;
463 }
464 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
465
466 /* Helpers to read keys */
_read_freq(struct dev_pm_opp * opp,int index)467 static unsigned long _read_freq(struct dev_pm_opp *opp, int index)
468 {
469 return opp->rates[index];
470 }
471
_read_level(struct dev_pm_opp * opp,int index)472 static unsigned long _read_level(struct dev_pm_opp *opp, int index)
473 {
474 return opp->level;
475 }
476
_read_bw(struct dev_pm_opp * opp,int index)477 static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
478 {
479 return opp->bandwidth[index].peak;
480 }
481
482 /* Generic comparison helpers */
_compare_exact(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key)483 static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
484 unsigned long opp_key, unsigned long key)
485 {
486 if (opp_key == key) {
487 *opp = temp_opp;
488 return true;
489 }
490
491 return false;
492 }
493
_compare_ceil(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key)494 static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
495 unsigned long opp_key, unsigned long key)
496 {
497 if (opp_key >= key) {
498 *opp = temp_opp;
499 return true;
500 }
501
502 return false;
503 }
504
_compare_floor(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key)505 static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
506 unsigned long opp_key, unsigned long key)
507 {
508 if (opp_key > key)
509 return true;
510
511 *opp = temp_opp;
512 return false;
513 }
514
515 /* Generic key finding helpers */
_opp_table_find_key(struct opp_table * opp_table,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* compare)(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key),bool (* assert)(struct opp_table * opp_table,unsigned int index))516 static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
517 unsigned long *key, int index, bool available,
518 unsigned long (*read)(struct dev_pm_opp *opp, int index),
519 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
520 unsigned long opp_key, unsigned long key),
521 bool (*assert)(struct opp_table *opp_table, unsigned int index))
522 {
523 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
524
525 /* Assert that the requirement is met */
526 if (assert && !assert(opp_table, index))
527 return ERR_PTR(-EINVAL);
528
529 mutex_lock(&opp_table->lock);
530
531 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
532 if (temp_opp->available == available) {
533 if (compare(&opp, temp_opp, read(temp_opp, index), *key))
534 break;
535 }
536 }
537
538 /* Increment the reference count of OPP */
539 if (!IS_ERR(opp)) {
540 *key = read(opp, index);
541 dev_pm_opp_get(opp);
542 }
543
544 mutex_unlock(&opp_table->lock);
545
546 return opp;
547 }
548
549 static struct dev_pm_opp *
_find_key(struct device * dev,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* compare)(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key),bool (* assert)(struct opp_table * opp_table,unsigned int index))550 _find_key(struct device *dev, unsigned long *key, int index, bool available,
551 unsigned long (*read)(struct dev_pm_opp *opp, int index),
552 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
553 unsigned long opp_key, unsigned long key),
554 bool (*assert)(struct opp_table *opp_table, unsigned int index))
555 {
556 struct opp_table *opp_table;
557 struct dev_pm_opp *opp;
558
559 opp_table = _find_opp_table(dev);
560 if (IS_ERR(opp_table)) {
561 dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
562 PTR_ERR(opp_table));
563 return ERR_CAST(opp_table);
564 }
565
566 opp = _opp_table_find_key(opp_table, key, index, available, read,
567 compare, assert);
568
569 dev_pm_opp_put_opp_table(opp_table);
570
571 return opp;
572 }
573
_find_key_exact(struct device * dev,unsigned long key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))574 static struct dev_pm_opp *_find_key_exact(struct device *dev,
575 unsigned long key, int index, bool available,
576 unsigned long (*read)(struct dev_pm_opp *opp, int index),
577 bool (*assert)(struct opp_table *opp_table, unsigned int index))
578 {
579 /*
580 * The value of key will be updated here, but will be ignored as the
581 * caller doesn't need it.
582 */
583 return _find_key(dev, &key, index, available, read, _compare_exact,
584 assert);
585 }
586
_opp_table_find_key_ceil(struct opp_table * opp_table,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))587 static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
588 unsigned long *key, int index, bool available,
589 unsigned long (*read)(struct dev_pm_opp *opp, int index),
590 bool (*assert)(struct opp_table *opp_table, unsigned int index))
591 {
592 return _opp_table_find_key(opp_table, key, index, available, read,
593 _compare_ceil, assert);
594 }
595
_find_key_ceil(struct device * dev,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))596 static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
597 int index, bool available,
598 unsigned long (*read)(struct dev_pm_opp *opp, int index),
599 bool (*assert)(struct opp_table *opp_table, unsigned int index))
600 {
601 return _find_key(dev, key, index, available, read, _compare_ceil,
602 assert);
603 }
604
_find_key_floor(struct device * dev,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))605 static struct dev_pm_opp *_find_key_floor(struct device *dev,
606 unsigned long *key, int index, bool available,
607 unsigned long (*read)(struct dev_pm_opp *opp, int index),
608 bool (*assert)(struct opp_table *opp_table, unsigned int index))
609 {
610 return _find_key(dev, key, index, available, read, _compare_floor,
611 assert);
612 }
613
614 /**
615 * dev_pm_opp_find_freq_exact() - search for an exact frequency
616 * @dev: device for which we do this operation
617 * @freq: frequency to search for
618 * @available: true/false - match for available opp
619 *
620 * Return: Searches for exact match in the opp table and returns pointer to the
621 * matching opp if found, else returns ERR_PTR in case of error and should
622 * be handled using IS_ERR. Error return values can be:
623 * EINVAL: for bad pointer
624 * ERANGE: no match found for search
625 * ENODEV: if device not found in list of registered devices
626 *
627 * Note: available is a modifier for the search. if available=true, then the
628 * match is for exact matching frequency and is available in the stored OPP
629 * table. if false, the match is for exact frequency which is not available.
630 *
631 * This provides a mechanism to enable an opp which is not available currently
632 * or the opposite as well.
633 *
634 * The callers are required to call dev_pm_opp_put() for the returned OPP after
635 * use.
636 */
dev_pm_opp_find_freq_exact(struct device * dev,unsigned long freq,bool available)637 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
638 unsigned long freq, bool available)
639 {
640 return _find_key_exact(dev, freq, 0, available, _read_freq,
641 assert_single_clk);
642 }
643 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
644
645 /**
646 * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the
647 * clock corresponding to the index
648 * @dev: Device for which we do this operation
649 * @freq: frequency to search for
650 * @index: Clock index
651 * @available: true/false - match for available opp
652 *
653 * Search for the matching exact OPP for the clock corresponding to the
654 * specified index from a starting freq for a device.
655 *
656 * Return: matching *opp , else returns ERR_PTR in case of error and should be
657 * handled using IS_ERR. Error return values can be:
658 * EINVAL: for bad pointer
659 * ERANGE: no match found for search
660 * ENODEV: if device not found in list of registered devices
661 *
662 * The callers are required to call dev_pm_opp_put() for the returned OPP after
663 * use.
664 */
665 struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device * dev,unsigned long freq,u32 index,bool available)666 dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
667 u32 index, bool available)
668 {
669 return _find_key_exact(dev, freq, index, available, _read_freq,
670 assert_clk_index);
671 }
672 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
673
_find_freq_ceil(struct opp_table * opp_table,unsigned long * freq)674 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
675 unsigned long *freq)
676 {
677 return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq,
678 assert_single_clk);
679 }
680
681 /**
682 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
683 * @dev: device for which we do this operation
684 * @freq: Start frequency
685 *
686 * Search for the matching ceil *available* OPP from a starting freq
687 * for a device.
688 *
689 * Return: matching *opp and refreshes *freq accordingly, else returns
690 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
691 * values can be:
692 * EINVAL: for bad pointer
693 * ERANGE: no match found for search
694 * ENODEV: if device not found in list of registered devices
695 *
696 * The callers are required to call dev_pm_opp_put() for the returned OPP after
697 * use.
698 */
dev_pm_opp_find_freq_ceil(struct device * dev,unsigned long * freq)699 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
700 unsigned long *freq)
701 {
702 return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk);
703 }
704 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
705
706 /**
707 * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the
708 * clock corresponding to the index
709 * @dev: Device for which we do this operation
710 * @freq: Start frequency
711 * @index: Clock index
712 *
713 * Search for the matching ceil *available* OPP for the clock corresponding to
714 * the specified index from a starting freq for a device.
715 *
716 * Return: matching *opp and refreshes *freq accordingly, else returns
717 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
718 * values can be:
719 * EINVAL: for bad pointer
720 * ERANGE: no match found for search
721 * ENODEV: if device not found in list of registered devices
722 *
723 * The callers are required to call dev_pm_opp_put() for the returned OPP after
724 * use.
725 */
726 struct dev_pm_opp *
dev_pm_opp_find_freq_ceil_indexed(struct device * dev,unsigned long * freq,u32 index)727 dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
728 u32 index)
729 {
730 return _find_key_ceil(dev, freq, index, true, _read_freq,
731 assert_clk_index);
732 }
733 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
734
735 /**
736 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
737 * @dev: device for which we do this operation
738 * @freq: Start frequency
739 *
740 * Search for the matching floor *available* OPP from a starting freq
741 * for a device.
742 *
743 * Return: matching *opp and refreshes *freq accordingly, else returns
744 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
745 * values can be:
746 * EINVAL: for bad pointer
747 * ERANGE: no match found for search
748 * ENODEV: if device not found in list of registered devices
749 *
750 * The callers are required to call dev_pm_opp_put() for the returned OPP after
751 * use.
752 */
dev_pm_opp_find_freq_floor(struct device * dev,unsigned long * freq)753 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
754 unsigned long *freq)
755 {
756 return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk);
757 }
758 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
759
760 /**
761 * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the
762 * clock corresponding to the index
763 * @dev: Device for which we do this operation
764 * @freq: Start frequency
765 * @index: Clock index
766 *
767 * Search for the matching floor *available* OPP for the clock corresponding to
768 * the specified index from a starting freq for a device.
769 *
770 * Return: matching *opp and refreshes *freq accordingly, else returns
771 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
772 * values can be:
773 * EINVAL: for bad pointer
774 * ERANGE: no match found for search
775 * ENODEV: if device not found in list of registered devices
776 *
777 * The callers are required to call dev_pm_opp_put() for the returned OPP after
778 * use.
779 */
780 struct dev_pm_opp *
dev_pm_opp_find_freq_floor_indexed(struct device * dev,unsigned long * freq,u32 index)781 dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
782 u32 index)
783 {
784 return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index);
785 }
786 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
787
788 /**
789 * dev_pm_opp_find_level_exact() - search for an exact level
790 * @dev: device for which we do this operation
791 * @level: level to search for
792 *
793 * Return: Searches for exact match in the opp table and returns pointer to the
794 * matching opp if found, else returns ERR_PTR in case of error and should
795 * be handled using IS_ERR. Error return values can be:
796 * EINVAL: for bad pointer
797 * ERANGE: no match found for search
798 * ENODEV: if device not found in list of registered devices
799 *
800 * The callers are required to call dev_pm_opp_put() for the returned OPP after
801 * use.
802 */
dev_pm_opp_find_level_exact(struct device * dev,unsigned int level)803 struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
804 unsigned int level)
805 {
806 return _find_key_exact(dev, level, 0, true, _read_level, NULL);
807 }
808 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
809
810 /**
811 * dev_pm_opp_find_level_ceil() - search for an rounded up level
812 * @dev: device for which we do this operation
813 * @level: level to search for
814 *
815 * Return: Searches for rounded up match in the opp table and returns pointer
816 * to the matching opp if found, else returns ERR_PTR in case of error and
817 * should be handled using IS_ERR. Error return values can be:
818 * EINVAL: for bad pointer
819 * ERANGE: no match found for search
820 * ENODEV: if device not found in list of registered devices
821 *
822 * The callers are required to call dev_pm_opp_put() for the returned OPP after
823 * use.
824 */
dev_pm_opp_find_level_ceil(struct device * dev,unsigned int * level)825 struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
826 unsigned int *level)
827 {
828 unsigned long temp = *level;
829 struct dev_pm_opp *opp;
830
831 opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
832 *level = temp;
833 return opp;
834 }
835 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
836
837 /**
838 * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
839 * @dev: device for which we do this operation
840 * @bw: start bandwidth
841 * @index: which bandwidth to compare, in case of OPPs with several values
842 *
843 * Search for the matching floor *available* OPP from a starting bandwidth
844 * for a device.
845 *
846 * Return: matching *opp and refreshes *bw accordingly, else returns
847 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
848 * values can be:
849 * EINVAL: for bad pointer
850 * ERANGE: no match found for search
851 * ENODEV: if device not found in list of registered devices
852 *
853 * The callers are required to call dev_pm_opp_put() for the returned OPP after
854 * use.
855 */
dev_pm_opp_find_bw_ceil(struct device * dev,unsigned int * bw,int index)856 struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
857 int index)
858 {
859 unsigned long temp = *bw;
860 struct dev_pm_opp *opp;
861
862 opp = _find_key_ceil(dev, &temp, index, true, _read_bw,
863 assert_bandwidth_index);
864 *bw = temp;
865 return opp;
866 }
867 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
868
869 /**
870 * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
871 * @dev: device for which we do this operation
872 * @bw: start bandwidth
873 * @index: which bandwidth to compare, in case of OPPs with several values
874 *
875 * Search for the matching floor *available* OPP from a starting bandwidth
876 * for a device.
877 *
878 * Return: matching *opp and refreshes *bw accordingly, else returns
879 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
880 * values can be:
881 * EINVAL: for bad pointer
882 * ERANGE: no match found for search
883 * ENODEV: if device not found in list of registered devices
884 *
885 * The callers are required to call dev_pm_opp_put() for the returned OPP after
886 * use.
887 */
dev_pm_opp_find_bw_floor(struct device * dev,unsigned int * bw,int index)888 struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
889 unsigned int *bw, int index)
890 {
891 unsigned long temp = *bw;
892 struct dev_pm_opp *opp;
893
894 opp = _find_key_floor(dev, &temp, index, true, _read_bw,
895 assert_bandwidth_index);
896 *bw = temp;
897 return opp;
898 }
899 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
900
_set_opp_voltage(struct device * dev,struct regulator * reg,struct dev_pm_opp_supply * supply)901 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
902 struct dev_pm_opp_supply *supply)
903 {
904 int ret;
905
906 /* Regulator not available for device */
907 if (IS_ERR(reg)) {
908 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
909 PTR_ERR(reg));
910 return 0;
911 }
912
913 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
914 supply->u_volt_min, supply->u_volt, supply->u_volt_max);
915
916 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
917 supply->u_volt, supply->u_volt_max);
918 if (ret)
919 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
920 __func__, supply->u_volt_min, supply->u_volt,
921 supply->u_volt_max, ret);
922
923 return ret;
924 }
925
926 static int
_opp_config_clk_single(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,void * data,bool scaling_down)927 _opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
928 struct dev_pm_opp *opp, void *data, bool scaling_down)
929 {
930 unsigned long *target = data;
931 unsigned long freq;
932 int ret;
933
934 /* One of target and opp must be available */
935 if (target) {
936 freq = *target;
937 } else if (opp) {
938 freq = opp->rates[0];
939 } else {
940 WARN_ON(1);
941 return -EINVAL;
942 }
943
944 ret = clk_set_rate(opp_table->clk, freq);
945 if (ret) {
946 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
947 ret);
948 } else {
949 opp_table->rate_clk_single = freq;
950 }
951
952 return ret;
953 }
954
955 /*
956 * Simple implementation for configuring multiple clocks. Configure clocks in
957 * the order in which they are present in the array while scaling up.
958 */
dev_pm_opp_config_clks_simple(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,void * data,bool scaling_down)959 int dev_pm_opp_config_clks_simple(struct device *dev,
960 struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
961 bool scaling_down)
962 {
963 int ret, i;
964
965 if (scaling_down) {
966 for (i = opp_table->clk_count - 1; i >= 0; i--) {
967 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
968 if (ret) {
969 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
970 ret);
971 return ret;
972 }
973 }
974 } else {
975 for (i = 0; i < opp_table->clk_count; i++) {
976 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
977 if (ret) {
978 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
979 ret);
980 return ret;
981 }
982 }
983 }
984
985 return 0;
986 }
987 EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
988
_opp_config_regulator_single(struct device * dev,struct dev_pm_opp * old_opp,struct dev_pm_opp * new_opp,struct regulator ** regulators,unsigned int count)989 static int _opp_config_regulator_single(struct device *dev,
990 struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
991 struct regulator **regulators, unsigned int count)
992 {
993 struct regulator *reg = regulators[0];
994 int ret;
995
996 /* This function only supports single regulator per device */
997 if (WARN_ON(count > 1)) {
998 dev_err(dev, "multiple regulators are not supported\n");
999 return -EINVAL;
1000 }
1001
1002 ret = _set_opp_voltage(dev, reg, new_opp->supplies);
1003 if (ret)
1004 return ret;
1005
1006 /*
1007 * Enable the regulator after setting its voltages, otherwise it breaks
1008 * some boot-enabled regulators.
1009 */
1010 if (unlikely(!new_opp->opp_table->enabled)) {
1011 ret = regulator_enable(reg);
1012 if (ret < 0)
1013 dev_warn(dev, "Failed to enable regulator: %d", ret);
1014 }
1015
1016 return 0;
1017 }
1018
_set_opp_bw(const struct opp_table * opp_table,struct dev_pm_opp * opp,struct device * dev)1019 static int _set_opp_bw(const struct opp_table *opp_table,
1020 struct dev_pm_opp *opp, struct device *dev)
1021 {
1022 u32 avg, peak;
1023 int i, ret;
1024
1025 if (!opp_table->paths)
1026 return 0;
1027
1028 for (i = 0; i < opp_table->path_count; i++) {
1029 if (!opp) {
1030 avg = 0;
1031 peak = 0;
1032 } else {
1033 avg = opp->bandwidth[i].avg;
1034 peak = opp->bandwidth[i].peak;
1035 }
1036 ret = icc_set_bw(opp_table->paths[i], avg, peak);
1037 if (ret) {
1038 dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
1039 opp ? "set" : "remove", i, ret);
1040 return ret;
1041 }
1042 }
1043
1044 return 0;
1045 }
1046
_set_performance_state(struct device * dev,struct device * pd_dev,struct dev_pm_opp * opp,int i)1047 static int _set_performance_state(struct device *dev, struct device *pd_dev,
1048 struct dev_pm_opp *opp, int i)
1049 {
1050 unsigned int pstate = likely(opp) ? opp->required_opps[i]->level: 0;
1051 int ret;
1052
1053 if (!pd_dev)
1054 return 0;
1055
1056 ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
1057 if (ret) {
1058 dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
1059 dev_name(pd_dev), pstate, ret);
1060 }
1061
1062 return ret;
1063 }
1064
_opp_set_required_opps_generic(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,bool scaling_down)1065 static int _opp_set_required_opps_generic(struct device *dev,
1066 struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down)
1067 {
1068 dev_err(dev, "setting required-opps isn't supported for non-genpd devices\n");
1069 return -ENOENT;
1070 }
1071
_opp_set_required_opps_genpd(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,bool scaling_down)1072 static int _opp_set_required_opps_genpd(struct device *dev,
1073 struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down)
1074 {
1075 struct device **genpd_virt_devs =
1076 opp_table->genpd_virt_devs ? opp_table->genpd_virt_devs : &dev;
1077 int i, ret = 0;
1078
1079 /*
1080 * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev
1081 * after it is freed from another thread.
1082 */
1083 mutex_lock(&opp_table->genpd_virt_dev_lock);
1084
1085 /* Scaling up? Set required OPPs in normal order, else reverse */
1086 if (!scaling_down) {
1087 for (i = 0; i < opp_table->required_opp_count; i++) {
1088 ret = _set_performance_state(dev, genpd_virt_devs[i], opp, i);
1089 if (ret)
1090 break;
1091 }
1092 } else {
1093 for (i = opp_table->required_opp_count - 1; i >= 0; i--) {
1094 ret = _set_performance_state(dev, genpd_virt_devs[i], opp, i);
1095 if (ret)
1096 break;
1097 }
1098 }
1099
1100 mutex_unlock(&opp_table->genpd_virt_dev_lock);
1101
1102 return ret;
1103 }
1104
1105 /* This is only called for PM domain for now */
_set_required_opps(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,bool up)1106 static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
1107 struct dev_pm_opp *opp, bool up)
1108 {
1109 /* required-opps not fully initialized yet */
1110 if (lazy_linking_pending(opp_table))
1111 return -EBUSY;
1112
1113 if (opp_table->set_required_opps)
1114 return opp_table->set_required_opps(dev, opp_table, opp, up);
1115
1116 return 0;
1117 }
1118
1119 /* Update set_required_opps handler */
_update_set_required_opps(struct opp_table * opp_table)1120 void _update_set_required_opps(struct opp_table *opp_table)
1121 {
1122 /* Already set */
1123 if (opp_table->set_required_opps)
1124 return;
1125
1126 /* All required OPPs will belong to genpd or none */
1127 if (opp_table->required_opp_tables[0]->is_genpd)
1128 opp_table->set_required_opps = _opp_set_required_opps_genpd;
1129 else
1130 opp_table->set_required_opps = _opp_set_required_opps_generic;
1131 }
1132
_find_current_opp(struct device * dev,struct opp_table * opp_table)1133 static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
1134 {
1135 struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
1136 unsigned long freq;
1137
1138 if (!IS_ERR(opp_table->clk)) {
1139 freq = clk_get_rate(opp_table->clk);
1140 opp = _find_freq_ceil(opp_table, &freq);
1141 }
1142
1143 /*
1144 * Unable to find the current OPP ? Pick the first from the list since
1145 * it is in ascending order, otherwise rest of the code will need to
1146 * make special checks to validate current_opp.
1147 */
1148 if (IS_ERR(opp)) {
1149 mutex_lock(&opp_table->lock);
1150 opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node);
1151 dev_pm_opp_get(opp);
1152 mutex_unlock(&opp_table->lock);
1153 }
1154
1155 opp_table->current_opp = opp;
1156 }
1157
_disable_opp_table(struct device * dev,struct opp_table * opp_table)1158 static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
1159 {
1160 int ret;
1161
1162 if (!opp_table->enabled)
1163 return 0;
1164
1165 /*
1166 * Some drivers need to support cases where some platforms may
1167 * have OPP table for the device, while others don't and
1168 * opp_set_rate() just needs to behave like clk_set_rate().
1169 */
1170 if (!_get_opp_count(opp_table))
1171 return 0;
1172
1173 ret = _set_opp_bw(opp_table, NULL, dev);
1174 if (ret)
1175 return ret;
1176
1177 if (opp_table->regulators)
1178 regulator_disable(opp_table->regulators[0]);
1179
1180 ret = _set_required_opps(dev, opp_table, NULL, false);
1181
1182 opp_table->enabled = false;
1183 return ret;
1184 }
1185
_set_opp(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,void * clk_data,bool forced)1186 static int _set_opp(struct device *dev, struct opp_table *opp_table,
1187 struct dev_pm_opp *opp, void *clk_data, bool forced)
1188 {
1189 struct dev_pm_opp *old_opp;
1190 int scaling_down, ret;
1191
1192 if (unlikely(!opp))
1193 return _disable_opp_table(dev, opp_table);
1194
1195 /* Find the currently set OPP if we don't know already */
1196 if (unlikely(!opp_table->current_opp))
1197 _find_current_opp(dev, opp_table);
1198
1199 old_opp = opp_table->current_opp;
1200
1201 /* Return early if nothing to do */
1202 if (!forced && old_opp == opp && opp_table->enabled) {
1203 dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__);
1204 return 0;
1205 }
1206
1207 dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
1208 __func__, old_opp->rates[0], opp->rates[0], old_opp->level,
1209 opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
1210 opp->bandwidth ? opp->bandwidth[0].peak : 0);
1211
1212 scaling_down = _opp_compare_key(opp_table, old_opp, opp);
1213 if (scaling_down == -1)
1214 scaling_down = 0;
1215
1216 /* Scaling up? Configure required OPPs before frequency */
1217 if (!scaling_down) {
1218 ret = _set_required_opps(dev, opp_table, opp, true);
1219 if (ret) {
1220 dev_err(dev, "Failed to set required opps: %d\n", ret);
1221 return ret;
1222 }
1223
1224 ret = _set_opp_bw(opp_table, opp, dev);
1225 if (ret) {
1226 dev_err(dev, "Failed to set bw: %d\n", ret);
1227 return ret;
1228 }
1229
1230 if (opp_table->config_regulators) {
1231 ret = opp_table->config_regulators(dev, old_opp, opp,
1232 opp_table->regulators,
1233 opp_table->regulator_count);
1234 if (ret) {
1235 dev_err(dev, "Failed to set regulator voltages: %d\n",
1236 ret);
1237 return ret;
1238 }
1239 }
1240 }
1241
1242 if (opp_table->config_clks) {
1243 ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down);
1244 if (ret)
1245 return ret;
1246 }
1247
1248 /* Scaling down? Configure required OPPs after frequency */
1249 if (scaling_down) {
1250 if (opp_table->config_regulators) {
1251 ret = opp_table->config_regulators(dev, old_opp, opp,
1252 opp_table->regulators,
1253 opp_table->regulator_count);
1254 if (ret) {
1255 dev_err(dev, "Failed to set regulator voltages: %d\n",
1256 ret);
1257 return ret;
1258 }
1259 }
1260
1261 ret = _set_opp_bw(opp_table, opp, dev);
1262 if (ret) {
1263 dev_err(dev, "Failed to set bw: %d\n", ret);
1264 return ret;
1265 }
1266
1267 ret = _set_required_opps(dev, opp_table, opp, false);
1268 if (ret) {
1269 dev_err(dev, "Failed to set required opps: %d\n", ret);
1270 return ret;
1271 }
1272 }
1273
1274 opp_table->enabled = true;
1275 dev_pm_opp_put(old_opp);
1276
1277 /* Make sure current_opp doesn't get freed */
1278 dev_pm_opp_get(opp);
1279 opp_table->current_opp = opp;
1280
1281 return ret;
1282 }
1283
1284 /**
1285 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
1286 * @dev: device for which we do this operation
1287 * @target_freq: frequency to achieve
1288 *
1289 * This configures the power-supplies to the levels specified by the OPP
1290 * corresponding to the target_freq, and programs the clock to a value <=
1291 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
1292 * provided by the opp, should have already rounded to the target OPP's
1293 * frequency.
1294 */
dev_pm_opp_set_rate(struct device * dev,unsigned long target_freq)1295 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
1296 {
1297 struct opp_table *opp_table;
1298 unsigned long freq = 0, temp_freq;
1299 struct dev_pm_opp *opp = NULL;
1300 bool forced = false;
1301 int ret;
1302
1303 opp_table = _find_opp_table(dev);
1304 if (IS_ERR(opp_table)) {
1305 dev_err(dev, "%s: device's opp table doesn't exist\n", __func__);
1306 return PTR_ERR(opp_table);
1307 }
1308
1309 if (target_freq) {
1310 /*
1311 * For IO devices which require an OPP on some platforms/SoCs
1312 * while just needing to scale the clock on some others
1313 * we look for empty OPP tables with just a clock handle and
1314 * scale only the clk. This makes dev_pm_opp_set_rate()
1315 * equivalent to a clk_set_rate()
1316 */
1317 if (!_get_opp_count(opp_table)) {
1318 ret = opp_table->config_clks(dev, opp_table, NULL,
1319 &target_freq, false);
1320 goto put_opp_table;
1321 }
1322
1323 freq = clk_round_rate(opp_table->clk, target_freq);
1324 if ((long)freq <= 0)
1325 freq = target_freq;
1326
1327 /*
1328 * The clock driver may support finer resolution of the
1329 * frequencies than the OPP table, don't update the frequency we
1330 * pass to clk_set_rate() here.
1331 */
1332 temp_freq = freq;
1333 opp = _find_freq_ceil(opp_table, &temp_freq);
1334 if (IS_ERR(opp)) {
1335 ret = PTR_ERR(opp);
1336 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
1337 __func__, freq, ret);
1338 goto put_opp_table;
1339 }
1340
1341 /*
1342 * An OPP entry specifies the highest frequency at which other
1343 * properties of the OPP entry apply. Even if the new OPP is
1344 * same as the old one, we may still reach here for a different
1345 * value of the frequency. In such a case, do not abort but
1346 * configure the hardware to the desired frequency forcefully.
1347 */
1348 forced = opp_table->rate_clk_single != freq;
1349 }
1350
1351 ret = _set_opp(dev, opp_table, opp, &freq, forced);
1352
1353 if (freq)
1354 dev_pm_opp_put(opp);
1355
1356 put_opp_table:
1357 dev_pm_opp_put_opp_table(opp_table);
1358 return ret;
1359 }
1360 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
1361
1362 /**
1363 * dev_pm_opp_set_opp() - Configure device for OPP
1364 * @dev: device for which we do this operation
1365 * @opp: OPP to set to
1366 *
1367 * This configures the device based on the properties of the OPP passed to this
1368 * routine.
1369 *
1370 * Return: 0 on success, a negative error number otherwise.
1371 */
dev_pm_opp_set_opp(struct device * dev,struct dev_pm_opp * opp)1372 int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
1373 {
1374 struct opp_table *opp_table;
1375 int ret;
1376
1377 opp_table = _find_opp_table(dev);
1378 if (IS_ERR(opp_table)) {
1379 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
1380 return PTR_ERR(opp_table);
1381 }
1382
1383 ret = _set_opp(dev, opp_table, opp, NULL, false);
1384 dev_pm_opp_put_opp_table(opp_table);
1385
1386 return ret;
1387 }
1388 EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp);
1389
1390 /* OPP-dev Helpers */
_remove_opp_dev(struct opp_device * opp_dev,struct opp_table * opp_table)1391 static void _remove_opp_dev(struct opp_device *opp_dev,
1392 struct opp_table *opp_table)
1393 {
1394 opp_debug_unregister(opp_dev, opp_table);
1395 list_del(&opp_dev->node);
1396 kfree(opp_dev);
1397 }
1398
_add_opp_dev(const struct device * dev,struct opp_table * opp_table)1399 struct opp_device *_add_opp_dev(const struct device *dev,
1400 struct opp_table *opp_table)
1401 {
1402 struct opp_device *opp_dev;
1403
1404 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
1405 if (!opp_dev)
1406 return NULL;
1407
1408 /* Initialize opp-dev */
1409 opp_dev->dev = dev;
1410
1411 mutex_lock(&opp_table->lock);
1412 list_add(&opp_dev->node, &opp_table->dev_list);
1413 mutex_unlock(&opp_table->lock);
1414
1415 /* Create debugfs entries for the opp_table */
1416 opp_debug_register(opp_dev, opp_table);
1417
1418 return opp_dev;
1419 }
1420
_allocate_opp_table(struct device * dev,int index)1421 static struct opp_table *_allocate_opp_table(struct device *dev, int index)
1422 {
1423 struct opp_table *opp_table;
1424 struct opp_device *opp_dev;
1425 int ret;
1426
1427 /*
1428 * Allocate a new OPP table. In the infrequent case where a new
1429 * device is needed to be added, we pay this penalty.
1430 */
1431 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
1432 if (!opp_table)
1433 return ERR_PTR(-ENOMEM);
1434
1435 mutex_init(&opp_table->lock);
1436 mutex_init(&opp_table->genpd_virt_dev_lock);
1437 INIT_LIST_HEAD(&opp_table->dev_list);
1438 INIT_LIST_HEAD(&opp_table->lazy);
1439
1440 opp_table->clk = ERR_PTR(-ENODEV);
1441
1442 /* Mark regulator count uninitialized */
1443 opp_table->regulator_count = -1;
1444
1445 opp_dev = _add_opp_dev(dev, opp_table);
1446 if (!opp_dev) {
1447 ret = -ENOMEM;
1448 goto err;
1449 }
1450
1451 _of_init_opp_table(opp_table, dev, index);
1452
1453 /* Find interconnect path(s) for the device */
1454 ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
1455 if (ret) {
1456 if (ret == -EPROBE_DEFER)
1457 goto remove_opp_dev;
1458
1459 dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
1460 __func__, ret);
1461 }
1462
1463 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
1464 INIT_LIST_HEAD(&opp_table->opp_list);
1465 kref_init(&opp_table->kref);
1466
1467 return opp_table;
1468
1469 remove_opp_dev:
1470 _of_clear_opp_table(opp_table);
1471 _remove_opp_dev(opp_dev, opp_table);
1472 mutex_destroy(&opp_table->genpd_virt_dev_lock);
1473 mutex_destroy(&opp_table->lock);
1474 err:
1475 kfree(opp_table);
1476 return ERR_PTR(ret);
1477 }
1478
_get_opp_table_kref(struct opp_table * opp_table)1479 void _get_opp_table_kref(struct opp_table *opp_table)
1480 {
1481 kref_get(&opp_table->kref);
1482 }
1483
_update_opp_table_clk(struct device * dev,struct opp_table * opp_table,bool getclk)1484 static struct opp_table *_update_opp_table_clk(struct device *dev,
1485 struct opp_table *opp_table,
1486 bool getclk)
1487 {
1488 int ret;
1489
1490 /*
1491 * Return early if we don't need to get clk or we have already done it
1492 * earlier.
1493 */
1494 if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) ||
1495 opp_table->clks)
1496 return opp_table;
1497
1498 /* Find clk for the device */
1499 opp_table->clk = clk_get(dev, NULL);
1500
1501 ret = PTR_ERR_OR_ZERO(opp_table->clk);
1502 if (!ret) {
1503 opp_table->config_clks = _opp_config_clk_single;
1504 opp_table->clk_count = 1;
1505 return opp_table;
1506 }
1507
1508 if (ret == -ENOENT) {
1509 /*
1510 * There are few platforms which don't want the OPP core to
1511 * manage device's clock settings. In such cases neither the
1512 * platform provides the clks explicitly to us, nor the DT
1513 * contains a valid clk entry. The OPP nodes in DT may still
1514 * contain "opp-hz" property though, which we need to parse and
1515 * allow the platform to find an OPP based on freq later on.
1516 *
1517 * This is a simple solution to take care of such corner cases,
1518 * i.e. make the clk_count 1, which lets us allocate space for
1519 * frequency in opp->rates and also parse the entries in DT.
1520 */
1521 opp_table->clk_count = 1;
1522
1523 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
1524 return opp_table;
1525 }
1526
1527 dev_pm_opp_put_opp_table(opp_table);
1528 dev_err_probe(dev, ret, "Couldn't find clock\n");
1529
1530 return ERR_PTR(ret);
1531 }
1532
1533 /*
1534 * We need to make sure that the OPP table for a device doesn't get added twice,
1535 * if this routine gets called in parallel with the same device pointer.
1536 *
1537 * The simplest way to enforce that is to perform everything (find existing
1538 * table and if not found, create a new one) under the opp_table_lock, so only
1539 * one creator gets access to the same. But that expands the critical section
1540 * under the lock and may end up causing circular dependencies with frameworks
1541 * like debugfs, interconnect or clock framework as they may be direct or
1542 * indirect users of OPP core.
1543 *
1544 * And for that reason we have to go for a bit tricky implementation here, which
1545 * uses the opp_tables_busy flag to indicate if another creator is in the middle
1546 * of adding an OPP table and others should wait for it to finish.
1547 */
_add_opp_table_indexed(struct device * dev,int index,bool getclk)1548 struct opp_table *_add_opp_table_indexed(struct device *dev, int index,
1549 bool getclk)
1550 {
1551 struct opp_table *opp_table;
1552
1553 again:
1554 mutex_lock(&opp_table_lock);
1555
1556 opp_table = _find_opp_table_unlocked(dev);
1557 if (!IS_ERR(opp_table))
1558 goto unlock;
1559
1560 /*
1561 * The opp_tables list or an OPP table's dev_list is getting updated by
1562 * another user, wait for it to finish.
1563 */
1564 if (unlikely(opp_tables_busy)) {
1565 mutex_unlock(&opp_table_lock);
1566 cpu_relax();
1567 goto again;
1568 }
1569
1570 opp_tables_busy = true;
1571 opp_table = _managed_opp(dev, index);
1572
1573 /* Drop the lock to reduce the size of critical section */
1574 mutex_unlock(&opp_table_lock);
1575
1576 if (opp_table) {
1577 if (!_add_opp_dev(dev, opp_table)) {
1578 dev_pm_opp_put_opp_table(opp_table);
1579 opp_table = ERR_PTR(-ENOMEM);
1580 }
1581
1582 mutex_lock(&opp_table_lock);
1583 } else {
1584 opp_table = _allocate_opp_table(dev, index);
1585
1586 mutex_lock(&opp_table_lock);
1587 if (!IS_ERR(opp_table))
1588 list_add(&opp_table->node, &opp_tables);
1589 }
1590
1591 opp_tables_busy = false;
1592
1593 unlock:
1594 mutex_unlock(&opp_table_lock);
1595
1596 return _update_opp_table_clk(dev, opp_table, getclk);
1597 }
1598
_add_opp_table(struct device * dev,bool getclk)1599 static struct opp_table *_add_opp_table(struct device *dev, bool getclk)
1600 {
1601 return _add_opp_table_indexed(dev, 0, getclk);
1602 }
1603
dev_pm_opp_get_opp_table(struct device * dev)1604 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
1605 {
1606 return _find_opp_table(dev);
1607 }
1608 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
1609
_opp_table_kref_release(struct kref * kref)1610 static void _opp_table_kref_release(struct kref *kref)
1611 {
1612 struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
1613 struct opp_device *opp_dev, *temp;
1614 int i;
1615
1616 /* Drop the lock as soon as we can */
1617 list_del(&opp_table->node);
1618 mutex_unlock(&opp_table_lock);
1619
1620 if (opp_table->current_opp)
1621 dev_pm_opp_put(opp_table->current_opp);
1622
1623 _of_clear_opp_table(opp_table);
1624
1625 /* Release automatically acquired single clk */
1626 if (!IS_ERR(opp_table->clk))
1627 clk_put(opp_table->clk);
1628
1629 if (opp_table->paths) {
1630 for (i = 0; i < opp_table->path_count; i++)
1631 icc_put(opp_table->paths[i]);
1632 kfree(opp_table->paths);
1633 }
1634
1635 WARN_ON(!list_empty(&opp_table->opp_list));
1636
1637 list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node)
1638 _remove_opp_dev(opp_dev, opp_table);
1639
1640 mutex_destroy(&opp_table->genpd_virt_dev_lock);
1641 mutex_destroy(&opp_table->lock);
1642 kfree(opp_table);
1643 }
1644
dev_pm_opp_put_opp_table(struct opp_table * opp_table)1645 void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
1646 {
1647 kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
1648 &opp_table_lock);
1649 }
1650 EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
1651
_opp_free(struct dev_pm_opp * opp)1652 void _opp_free(struct dev_pm_opp *opp)
1653 {
1654 kfree(opp);
1655 }
1656
_opp_kref_release(struct kref * kref)1657 static void _opp_kref_release(struct kref *kref)
1658 {
1659 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1660 struct opp_table *opp_table = opp->opp_table;
1661
1662 list_del(&opp->node);
1663 mutex_unlock(&opp_table->lock);
1664
1665 /*
1666 * Notify the changes in the availability of the operable
1667 * frequency/voltage list.
1668 */
1669 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
1670 _of_clear_opp(opp_table, opp);
1671 opp_debug_remove_one(opp);
1672 kfree(opp);
1673 }
1674
dev_pm_opp_get(struct dev_pm_opp * opp)1675 void dev_pm_opp_get(struct dev_pm_opp *opp)
1676 {
1677 kref_get(&opp->kref);
1678 }
1679
dev_pm_opp_put(struct dev_pm_opp * opp)1680 void dev_pm_opp_put(struct dev_pm_opp *opp)
1681 {
1682 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
1683 }
1684 EXPORT_SYMBOL_GPL(dev_pm_opp_put);
1685
1686 /**
1687 * dev_pm_opp_remove() - Remove an OPP from OPP table
1688 * @dev: device for which we do this operation
1689 * @freq: OPP to remove with matching 'freq'
1690 *
1691 * This function removes an opp from the opp table.
1692 */
dev_pm_opp_remove(struct device * dev,unsigned long freq)1693 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
1694 {
1695 struct dev_pm_opp *opp = NULL, *iter;
1696 struct opp_table *opp_table;
1697
1698 opp_table = _find_opp_table(dev);
1699 if (IS_ERR(opp_table))
1700 return;
1701
1702 if (!assert_single_clk(opp_table, 0))
1703 goto put_table;
1704
1705 mutex_lock(&opp_table->lock);
1706
1707 list_for_each_entry(iter, &opp_table->opp_list, node) {
1708 if (iter->rates[0] == freq) {
1709 opp = iter;
1710 break;
1711 }
1712 }
1713
1714 mutex_unlock(&opp_table->lock);
1715
1716 if (opp) {
1717 dev_pm_opp_put(opp);
1718
1719 /* Drop the reference taken by dev_pm_opp_add() */
1720 dev_pm_opp_put_opp_table(opp_table);
1721 } else {
1722 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
1723 __func__, freq);
1724 }
1725
1726 put_table:
1727 /* Drop the reference taken by _find_opp_table() */
1728 dev_pm_opp_put_opp_table(opp_table);
1729 }
1730 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1731
_opp_get_next(struct opp_table * opp_table,bool dynamic)1732 static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
1733 bool dynamic)
1734 {
1735 struct dev_pm_opp *opp = NULL, *temp;
1736
1737 mutex_lock(&opp_table->lock);
1738 list_for_each_entry(temp, &opp_table->opp_list, node) {
1739 /*
1740 * Refcount must be dropped only once for each OPP by OPP core,
1741 * do that with help of "removed" flag.
1742 */
1743 if (!temp->removed && dynamic == temp->dynamic) {
1744 opp = temp;
1745 break;
1746 }
1747 }
1748
1749 mutex_unlock(&opp_table->lock);
1750 return opp;
1751 }
1752
1753 /*
1754 * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to
1755 * happen lock less to avoid circular dependency issues. This routine must be
1756 * called without the opp_table->lock held.
1757 */
_opp_remove_all(struct opp_table * opp_table,bool dynamic)1758 static void _opp_remove_all(struct opp_table *opp_table, bool dynamic)
1759 {
1760 struct dev_pm_opp *opp;
1761
1762 while ((opp = _opp_get_next(opp_table, dynamic))) {
1763 opp->removed = true;
1764 dev_pm_opp_put(opp);
1765
1766 /* Drop the references taken by dev_pm_opp_add() */
1767 if (dynamic)
1768 dev_pm_opp_put_opp_table(opp_table);
1769 }
1770 }
1771
_opp_remove_all_static(struct opp_table * opp_table)1772 bool _opp_remove_all_static(struct opp_table *opp_table)
1773 {
1774 mutex_lock(&opp_table->lock);
1775
1776 if (!opp_table->parsed_static_opps) {
1777 mutex_unlock(&opp_table->lock);
1778 return false;
1779 }
1780
1781 if (--opp_table->parsed_static_opps) {
1782 mutex_unlock(&opp_table->lock);
1783 return true;
1784 }
1785
1786 mutex_unlock(&opp_table->lock);
1787
1788 _opp_remove_all(opp_table, false);
1789 return true;
1790 }
1791
1792 /**
1793 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
1794 * @dev: device for which we do this operation
1795 *
1796 * This function removes all dynamically created OPPs from the opp table.
1797 */
dev_pm_opp_remove_all_dynamic(struct device * dev)1798 void dev_pm_opp_remove_all_dynamic(struct device *dev)
1799 {
1800 struct opp_table *opp_table;
1801
1802 opp_table = _find_opp_table(dev);
1803 if (IS_ERR(opp_table))
1804 return;
1805
1806 _opp_remove_all(opp_table, true);
1807
1808 /* Drop the reference taken by _find_opp_table() */
1809 dev_pm_opp_put_opp_table(opp_table);
1810 }
1811 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
1812
_opp_allocate(struct opp_table * opp_table)1813 struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
1814 {
1815 struct dev_pm_opp *opp;
1816 int supply_count, supply_size, icc_size, clk_size;
1817
1818 /* Allocate space for at least one supply */
1819 supply_count = opp_table->regulator_count > 0 ?
1820 opp_table->regulator_count : 1;
1821 supply_size = sizeof(*opp->supplies) * supply_count;
1822 clk_size = sizeof(*opp->rates) * opp_table->clk_count;
1823 icc_size = sizeof(*opp->bandwidth) * opp_table->path_count;
1824
1825 /* allocate new OPP node and supplies structures */
1826 opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL);
1827 if (!opp)
1828 return NULL;
1829
1830 /* Put the supplies, bw and clock at the end of the OPP structure */
1831 opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
1832
1833 opp->rates = (unsigned long *)(opp->supplies + supply_count);
1834
1835 if (icc_size)
1836 opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count);
1837
1838 INIT_LIST_HEAD(&opp->node);
1839
1840 return opp;
1841 }
1842
_opp_supported_by_regulators(struct dev_pm_opp * opp,struct opp_table * opp_table)1843 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
1844 struct opp_table *opp_table)
1845 {
1846 struct regulator *reg;
1847 int i;
1848
1849 if (!opp_table->regulators)
1850 return true;
1851
1852 for (i = 0; i < opp_table->regulator_count; i++) {
1853 reg = opp_table->regulators[i];
1854
1855 if (!regulator_is_supported_voltage(reg,
1856 opp->supplies[i].u_volt_min,
1857 opp->supplies[i].u_volt_max)) {
1858 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
1859 __func__, opp->supplies[i].u_volt_min,
1860 opp->supplies[i].u_volt_max);
1861 return false;
1862 }
1863 }
1864
1865 return true;
1866 }
1867
_opp_compare_rate(struct opp_table * opp_table,struct dev_pm_opp * opp1,struct dev_pm_opp * opp2)1868 static int _opp_compare_rate(struct opp_table *opp_table,
1869 struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
1870 {
1871 int i;
1872
1873 for (i = 0; i < opp_table->clk_count; i++) {
1874 if (opp1->rates[i] != opp2->rates[i])
1875 return opp1->rates[i] < opp2->rates[i] ? -1 : 1;
1876 }
1877
1878 /* Same rates for both OPPs */
1879 return 0;
1880 }
1881
_opp_compare_bw(struct opp_table * opp_table,struct dev_pm_opp * opp1,struct dev_pm_opp * opp2)1882 static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1,
1883 struct dev_pm_opp *opp2)
1884 {
1885 int i;
1886
1887 for (i = 0; i < opp_table->path_count; i++) {
1888 if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak)
1889 return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1;
1890 }
1891
1892 /* Same bw for both OPPs */
1893 return 0;
1894 }
1895
1896 /*
1897 * Returns
1898 * 0: opp1 == opp2
1899 * 1: opp1 > opp2
1900 * -1: opp1 < opp2
1901 */
_opp_compare_key(struct opp_table * opp_table,struct dev_pm_opp * opp1,struct dev_pm_opp * opp2)1902 int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1,
1903 struct dev_pm_opp *opp2)
1904 {
1905 int ret;
1906
1907 ret = _opp_compare_rate(opp_table, opp1, opp2);
1908 if (ret)
1909 return ret;
1910
1911 ret = _opp_compare_bw(opp_table, opp1, opp2);
1912 if (ret)
1913 return ret;
1914
1915 if (opp1->level != opp2->level)
1916 return opp1->level < opp2->level ? -1 : 1;
1917
1918 /* Duplicate OPPs */
1919 return 0;
1920 }
1921
_opp_is_duplicate(struct device * dev,struct dev_pm_opp * new_opp,struct opp_table * opp_table,struct list_head ** head)1922 static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
1923 struct opp_table *opp_table,
1924 struct list_head **head)
1925 {
1926 struct dev_pm_opp *opp;
1927 int opp_cmp;
1928
1929 /*
1930 * Insert new OPP in order of increasing frequency and discard if
1931 * already present.
1932 *
1933 * Need to use &opp_table->opp_list in the condition part of the 'for'
1934 * loop, don't replace it with head otherwise it will become an infinite
1935 * loop.
1936 */
1937 list_for_each_entry(opp, &opp_table->opp_list, node) {
1938 opp_cmp = _opp_compare_key(opp_table, new_opp, opp);
1939 if (opp_cmp > 0) {
1940 *head = &opp->node;
1941 continue;
1942 }
1943
1944 if (opp_cmp < 0)
1945 return 0;
1946
1947 /* Duplicate OPPs */
1948 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1949 __func__, opp->rates[0], opp->supplies[0].u_volt,
1950 opp->available, new_opp->rates[0],
1951 new_opp->supplies[0].u_volt, new_opp->available);
1952
1953 /* Should we compare voltages for all regulators here ? */
1954 return opp->available &&
1955 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
1956 }
1957
1958 return 0;
1959 }
1960
_required_opps_available(struct dev_pm_opp * opp,int count)1961 void _required_opps_available(struct dev_pm_opp *opp, int count)
1962 {
1963 int i;
1964
1965 for (i = 0; i < count; i++) {
1966 if (opp->required_opps[i]->available)
1967 continue;
1968
1969 opp->available = false;
1970 pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
1971 __func__, opp->required_opps[i]->np, opp->rates[0]);
1972 return;
1973 }
1974 }
1975
1976 /*
1977 * Returns:
1978 * 0: On success. And appropriate error message for duplicate OPPs.
1979 * -EBUSY: For OPP with same freq/volt and is available. The callers of
1980 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
1981 * sure we don't print error messages unnecessarily if different parts of
1982 * kernel try to initialize the OPP table.
1983 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
1984 * should be considered an error by the callers of _opp_add().
1985 */
_opp_add(struct device * dev,struct dev_pm_opp * new_opp,struct opp_table * opp_table)1986 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1987 struct opp_table *opp_table)
1988 {
1989 struct list_head *head;
1990 int ret;
1991
1992 mutex_lock(&opp_table->lock);
1993 head = &opp_table->opp_list;
1994
1995 ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
1996 if (ret) {
1997 mutex_unlock(&opp_table->lock);
1998 return ret;
1999 }
2000
2001 list_add(&new_opp->node, head);
2002 mutex_unlock(&opp_table->lock);
2003
2004 new_opp->opp_table = opp_table;
2005 kref_init(&new_opp->kref);
2006
2007 opp_debug_create_one(new_opp, opp_table);
2008
2009 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
2010 new_opp->available = false;
2011 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
2012 __func__, new_opp->rates[0]);
2013 }
2014
2015 /* required-opps not fully initialized yet */
2016 if (lazy_linking_pending(opp_table))
2017 return 0;
2018
2019 _required_opps_available(new_opp, opp_table->required_opp_count);
2020
2021 return 0;
2022 }
2023
2024 /**
2025 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
2026 * @opp_table: OPP table
2027 * @dev: device for which we do this operation
2028 * @freq: Frequency in Hz for this OPP
2029 * @u_volt: Voltage in uVolts for this OPP
2030 * @dynamic: Dynamically added OPPs.
2031 *
2032 * This function adds an opp definition to the opp table and returns status.
2033 * The opp is made available by default and it can be controlled using
2034 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
2035 *
2036 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
2037 * and freed by dev_pm_opp_of_remove_table.
2038 *
2039 * Return:
2040 * 0 On success OR
2041 * Duplicate OPPs (both freq and volt are same) and opp->available
2042 * -EEXIST Freq are same and volt are different OR
2043 * Duplicate OPPs (both freq and volt are same) and !opp->available
2044 * -ENOMEM Memory allocation failure
2045 */
_opp_add_v1(struct opp_table * opp_table,struct device * dev,unsigned long freq,long u_volt,bool dynamic)2046 int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
2047 unsigned long freq, long u_volt, bool dynamic)
2048 {
2049 struct dev_pm_opp *new_opp;
2050 unsigned long tol;
2051 int ret;
2052
2053 if (!assert_single_clk(opp_table, 0))
2054 return -EINVAL;
2055
2056 new_opp = _opp_allocate(opp_table);
2057 if (!new_opp)
2058 return -ENOMEM;
2059
2060 /* populate the opp table */
2061 new_opp->rates[0] = freq;
2062 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
2063 new_opp->supplies[0].u_volt = u_volt;
2064 new_opp->supplies[0].u_volt_min = u_volt - tol;
2065 new_opp->supplies[0].u_volt_max = u_volt + tol;
2066 new_opp->available = true;
2067 new_opp->dynamic = dynamic;
2068
2069 ret = _opp_add(dev, new_opp, opp_table);
2070 if (ret) {
2071 /* Don't return error for duplicate OPPs */
2072 if (ret == -EBUSY)
2073 ret = 0;
2074 goto free_opp;
2075 }
2076
2077 /*
2078 * Notify the changes in the availability of the operable
2079 * frequency/voltage list.
2080 */
2081 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
2082 return 0;
2083
2084 free_opp:
2085 _opp_free(new_opp);
2086
2087 return ret;
2088 }
2089
2090 /**
2091 * _opp_set_supported_hw() - Set supported platforms
2092 * @dev: Device for which supported-hw has to be set.
2093 * @versions: Array of hierarchy of versions to match.
2094 * @count: Number of elements in the array.
2095 *
2096 * This is required only for the V2 bindings, and it enables a platform to
2097 * specify the hierarchy of versions it supports. OPP layer will then enable
2098 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
2099 * property.
2100 */
_opp_set_supported_hw(struct opp_table * opp_table,const u32 * versions,unsigned int count)2101 static int _opp_set_supported_hw(struct opp_table *opp_table,
2102 const u32 *versions, unsigned int count)
2103 {
2104 /* Another CPU that shares the OPP table has set the property ? */
2105 if (opp_table->supported_hw)
2106 return 0;
2107
2108 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
2109 GFP_KERNEL);
2110 if (!opp_table->supported_hw)
2111 return -ENOMEM;
2112
2113 opp_table->supported_hw_count = count;
2114
2115 return 0;
2116 }
2117
2118 /**
2119 * _opp_put_supported_hw() - Releases resources blocked for supported hw
2120 * @opp_table: OPP table returned by _opp_set_supported_hw().
2121 *
2122 * This is required only for the V2 bindings, and is called for a matching
2123 * _opp_set_supported_hw(). Until this is called, the opp_table structure
2124 * will not be freed.
2125 */
_opp_put_supported_hw(struct opp_table * opp_table)2126 static void _opp_put_supported_hw(struct opp_table *opp_table)
2127 {
2128 if (opp_table->supported_hw) {
2129 kfree(opp_table->supported_hw);
2130 opp_table->supported_hw = NULL;
2131 opp_table->supported_hw_count = 0;
2132 }
2133 }
2134
2135 /**
2136 * _opp_set_prop_name() - Set prop-extn name
2137 * @dev: Device for which the prop-name has to be set.
2138 * @name: name to postfix to properties.
2139 *
2140 * This is required only for the V2 bindings, and it enables a platform to
2141 * specify the extn to be used for certain property names. The properties to
2142 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
2143 * should postfix the property name with -<name> while looking for them.
2144 */
_opp_set_prop_name(struct opp_table * opp_table,const char * name)2145 static int _opp_set_prop_name(struct opp_table *opp_table, const char *name)
2146 {
2147 /* Another CPU that shares the OPP table has set the property ? */
2148 if (!opp_table->prop_name) {
2149 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
2150 if (!opp_table->prop_name)
2151 return -ENOMEM;
2152 }
2153
2154 return 0;
2155 }
2156
2157 /**
2158 * _opp_put_prop_name() - Releases resources blocked for prop-name
2159 * @opp_table: OPP table returned by _opp_set_prop_name().
2160 *
2161 * This is required only for the V2 bindings, and is called for a matching
2162 * _opp_set_prop_name(). Until this is called, the opp_table structure
2163 * will not be freed.
2164 */
_opp_put_prop_name(struct opp_table * opp_table)2165 static void _opp_put_prop_name(struct opp_table *opp_table)
2166 {
2167 if (opp_table->prop_name) {
2168 kfree(opp_table->prop_name);
2169 opp_table->prop_name = NULL;
2170 }
2171 }
2172
2173 /**
2174 * _opp_set_regulators() - Set regulator names for the device
2175 * @dev: Device for which regulator name is being set.
2176 * @names: Array of pointers to the names of the regulator.
2177 * @count: Number of regulators.
2178 *
2179 * In order to support OPP switching, OPP layer needs to know the name of the
2180 * device's regulators, as the core would be required to switch voltages as
2181 * well.
2182 *
2183 * This must be called before any OPPs are initialized for the device.
2184 */
_opp_set_regulators(struct opp_table * opp_table,struct device * dev,const char * const names[])2185 static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev,
2186 const char * const names[])
2187 {
2188 const char * const *temp = names;
2189 struct regulator *reg;
2190 int count = 0, ret, i;
2191
2192 /* Count number of regulators */
2193 while (*temp++)
2194 count++;
2195
2196 if (!count)
2197 return -EINVAL;
2198
2199 /* Another CPU that shares the OPP table has set the regulators ? */
2200 if (opp_table->regulators)
2201 return 0;
2202
2203 opp_table->regulators = kmalloc_array(count,
2204 sizeof(*opp_table->regulators),
2205 GFP_KERNEL);
2206 if (!opp_table->regulators)
2207 return -ENOMEM;
2208
2209 for (i = 0; i < count; i++) {
2210 reg = regulator_get_optional(dev, names[i]);
2211 if (IS_ERR(reg)) {
2212 ret = dev_err_probe(dev, PTR_ERR(reg),
2213 "%s: no regulator (%s) found\n",
2214 __func__, names[i]);
2215 goto free_regulators;
2216 }
2217
2218 opp_table->regulators[i] = reg;
2219 }
2220
2221 opp_table->regulator_count = count;
2222
2223 /* Set generic config_regulators() for single regulators here */
2224 if (count == 1)
2225 opp_table->config_regulators = _opp_config_regulator_single;
2226
2227 return 0;
2228
2229 free_regulators:
2230 while (i != 0)
2231 regulator_put(opp_table->regulators[--i]);
2232
2233 kfree(opp_table->regulators);
2234 opp_table->regulators = NULL;
2235 opp_table->regulator_count = -1;
2236
2237 return ret;
2238 }
2239
2240 /**
2241 * _opp_put_regulators() - Releases resources blocked for regulator
2242 * @opp_table: OPP table returned from _opp_set_regulators().
2243 */
_opp_put_regulators(struct opp_table * opp_table)2244 static void _opp_put_regulators(struct opp_table *opp_table)
2245 {
2246 int i;
2247
2248 if (!opp_table->regulators)
2249 return;
2250
2251 if (opp_table->enabled) {
2252 for (i = opp_table->regulator_count - 1; i >= 0; i--)
2253 regulator_disable(opp_table->regulators[i]);
2254 }
2255
2256 for (i = opp_table->regulator_count - 1; i >= 0; i--)
2257 regulator_put(opp_table->regulators[i]);
2258
2259 kfree(opp_table->regulators);
2260 opp_table->regulators = NULL;
2261 opp_table->regulator_count = -1;
2262 }
2263
_put_clks(struct opp_table * opp_table,int count)2264 static void _put_clks(struct opp_table *opp_table, int count)
2265 {
2266 int i;
2267
2268 for (i = count - 1; i >= 0; i--)
2269 clk_put(opp_table->clks[i]);
2270
2271 kfree(opp_table->clks);
2272 opp_table->clks = NULL;
2273 }
2274
2275 /**
2276 * _opp_set_clknames() - Set clk names for the device
2277 * @dev: Device for which clk names is being set.
2278 * @names: Clk names.
2279 *
2280 * In order to support OPP switching, OPP layer needs to get pointers to the
2281 * clocks for the device. Simple cases work fine without using this routine
2282 * (i.e. by passing connection-id as NULL), but for a device with multiple
2283 * clocks available, the OPP core needs to know the exact names of the clks to
2284 * use.
2285 *
2286 * This must be called before any OPPs are initialized for the device.
2287 */
_opp_set_clknames(struct opp_table * opp_table,struct device * dev,const char * const names[],config_clks_t config_clks)2288 static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev,
2289 const char * const names[],
2290 config_clks_t config_clks)
2291 {
2292 const char * const *temp = names;
2293 int count = 0, ret, i;
2294 struct clk *clk;
2295
2296 /* Count number of clks */
2297 while (*temp++)
2298 count++;
2299
2300 /*
2301 * This is a special case where we have a single clock, whose connection
2302 * id name is NULL, i.e. first two entries are NULL in the array.
2303 */
2304 if (!count && !names[1])
2305 count = 1;
2306
2307 /* Fail early for invalid configurations */
2308 if (!count || (!config_clks && count > 1))
2309 return -EINVAL;
2310
2311 /* Another CPU that shares the OPP table has set the clkname ? */
2312 if (opp_table->clks)
2313 return 0;
2314
2315 opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks),
2316 GFP_KERNEL);
2317 if (!opp_table->clks)
2318 return -ENOMEM;
2319
2320 /* Find clks for the device */
2321 for (i = 0; i < count; i++) {
2322 clk = clk_get(dev, names[i]);
2323 if (IS_ERR(clk)) {
2324 ret = dev_err_probe(dev, PTR_ERR(clk),
2325 "%s: Couldn't find clock with name: %s\n",
2326 __func__, names[i]);
2327 goto free_clks;
2328 }
2329
2330 opp_table->clks[i] = clk;
2331 }
2332
2333 opp_table->clk_count = count;
2334 opp_table->config_clks = config_clks;
2335
2336 /* Set generic single clk set here */
2337 if (count == 1) {
2338 if (!opp_table->config_clks)
2339 opp_table->config_clks = _opp_config_clk_single;
2340
2341 /*
2342 * We could have just dropped the "clk" field and used "clks"
2343 * everywhere. Instead we kept the "clk" field around for
2344 * following reasons:
2345 *
2346 * - avoiding clks[0] everywhere else.
2347 * - not running single clk helpers for multiple clk usecase by
2348 * mistake.
2349 *
2350 * Since this is single-clk case, just update the clk pointer
2351 * too.
2352 */
2353 opp_table->clk = opp_table->clks[0];
2354 }
2355
2356 return 0;
2357
2358 free_clks:
2359 _put_clks(opp_table, i);
2360 return ret;
2361 }
2362
2363 /**
2364 * _opp_put_clknames() - Releases resources blocked for clks.
2365 * @opp_table: OPP table returned from _opp_set_clknames().
2366 */
_opp_put_clknames(struct opp_table * opp_table)2367 static void _opp_put_clknames(struct opp_table *opp_table)
2368 {
2369 if (!opp_table->clks)
2370 return;
2371
2372 opp_table->config_clks = NULL;
2373 opp_table->clk = ERR_PTR(-ENODEV);
2374
2375 _put_clks(opp_table, opp_table->clk_count);
2376 }
2377
2378 /**
2379 * _opp_set_config_regulators_helper() - Register custom set regulator helper.
2380 * @dev: Device for which the helper is getting registered.
2381 * @config_regulators: Custom set regulator helper.
2382 *
2383 * This is useful to support platforms with multiple regulators per device.
2384 *
2385 * This must be called before any OPPs are initialized for the device.
2386 */
_opp_set_config_regulators_helper(struct opp_table * opp_table,struct device * dev,config_regulators_t config_regulators)2387 static int _opp_set_config_regulators_helper(struct opp_table *opp_table,
2388 struct device *dev, config_regulators_t config_regulators)
2389 {
2390 /* Another CPU that shares the OPP table has set the helper ? */
2391 if (!opp_table->config_regulators)
2392 opp_table->config_regulators = config_regulators;
2393
2394 return 0;
2395 }
2396
2397 /**
2398 * _opp_put_config_regulators_helper() - Releases resources blocked for
2399 * config_regulators helper.
2400 * @opp_table: OPP table returned from _opp_set_config_regulators_helper().
2401 *
2402 * Release resources blocked for platform specific config_regulators helper.
2403 */
_opp_put_config_regulators_helper(struct opp_table * opp_table)2404 static void _opp_put_config_regulators_helper(struct opp_table *opp_table)
2405 {
2406 if (opp_table->config_regulators)
2407 opp_table->config_regulators = NULL;
2408 }
2409
_detach_genpd(struct opp_table * opp_table)2410 static void _detach_genpd(struct opp_table *opp_table)
2411 {
2412 int index;
2413
2414 if (!opp_table->genpd_virt_devs)
2415 return;
2416
2417 for (index = 0; index < opp_table->required_opp_count; index++) {
2418 if (!opp_table->genpd_virt_devs[index])
2419 continue;
2420
2421 dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false);
2422 opp_table->genpd_virt_devs[index] = NULL;
2423 }
2424
2425 kfree(opp_table->genpd_virt_devs);
2426 opp_table->genpd_virt_devs = NULL;
2427 }
2428
2429 /**
2430 * _opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
2431 * @dev: Consumer device for which the genpd is getting attached.
2432 * @names: Null terminated array of pointers containing names of genpd to attach.
2433 * @virt_devs: Pointer to return the array of virtual devices.
2434 *
2435 * Multiple generic power domains for a device are supported with the help of
2436 * virtual genpd devices, which are created for each consumer device - genpd
2437 * pair. These are the device structures which are attached to the power domain
2438 * and are required by the OPP core to set the performance state of the genpd.
2439 * The same API also works for the case where single genpd is available and so
2440 * we don't need to support that separately.
2441 *
2442 * This helper will normally be called by the consumer driver of the device
2443 * "dev", as only that has details of the genpd names.
2444 *
2445 * This helper needs to be called once with a list of all genpd to attach.
2446 * Otherwise the original device structure will be used instead by the OPP core.
2447 *
2448 * The order of entries in the names array must match the order in which
2449 * "required-opps" are added in DT.
2450 */
_opp_attach_genpd(struct opp_table * opp_table,struct device * dev,const char * const * names,struct device *** virt_devs)2451 static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
2452 const char * const *names, struct device ***virt_devs)
2453 {
2454 struct device *virt_dev;
2455 int index = 0, ret = -EINVAL;
2456 const char * const *name = names;
2457
2458 if (opp_table->genpd_virt_devs)
2459 return 0;
2460
2461 /*
2462 * If the genpd's OPP table isn't already initialized, parsing of the
2463 * required-opps fail for dev. We should retry this after genpd's OPP
2464 * table is added.
2465 */
2466 if (!opp_table->required_opp_count)
2467 return -EPROBE_DEFER;
2468
2469 mutex_lock(&opp_table->genpd_virt_dev_lock);
2470
2471 opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count,
2472 sizeof(*opp_table->genpd_virt_devs),
2473 GFP_KERNEL);
2474 if (!opp_table->genpd_virt_devs)
2475 goto unlock;
2476
2477 while (*name) {
2478 if (index >= opp_table->required_opp_count) {
2479 dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n",
2480 *name, opp_table->required_opp_count, index);
2481 goto err;
2482 }
2483
2484 virt_dev = dev_pm_domain_attach_by_name(dev, *name);
2485 if (IS_ERR_OR_NULL(virt_dev)) {
2486 ret = virt_dev ? PTR_ERR(virt_dev) : -ENODEV;
2487 dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
2488 goto err;
2489 }
2490
2491 opp_table->genpd_virt_devs[index] = virt_dev;
2492 index++;
2493 name++;
2494 }
2495
2496 if (virt_devs)
2497 *virt_devs = opp_table->genpd_virt_devs;
2498 mutex_unlock(&opp_table->genpd_virt_dev_lock);
2499
2500 return 0;
2501
2502 err:
2503 _detach_genpd(opp_table);
2504 unlock:
2505 mutex_unlock(&opp_table->genpd_virt_dev_lock);
2506 return ret;
2507
2508 }
2509
2510 /**
2511 * _opp_detach_genpd() - Detach genpd(s) from the device.
2512 * @opp_table: OPP table returned by _opp_attach_genpd().
2513 *
2514 * This detaches the genpd(s), resets the virtual device pointers, and puts the
2515 * OPP table.
2516 */
_opp_detach_genpd(struct opp_table * opp_table)2517 static void _opp_detach_genpd(struct opp_table *opp_table)
2518 {
2519 /*
2520 * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
2521 * used in parallel.
2522 */
2523 mutex_lock(&opp_table->genpd_virt_dev_lock);
2524 _detach_genpd(opp_table);
2525 mutex_unlock(&opp_table->genpd_virt_dev_lock);
2526 }
2527
_opp_clear_config(struct opp_config_data * data)2528 static void _opp_clear_config(struct opp_config_data *data)
2529 {
2530 if (data->flags & OPP_CONFIG_GENPD)
2531 _opp_detach_genpd(data->opp_table);
2532 if (data->flags & OPP_CONFIG_REGULATOR)
2533 _opp_put_regulators(data->opp_table);
2534 if (data->flags & OPP_CONFIG_SUPPORTED_HW)
2535 _opp_put_supported_hw(data->opp_table);
2536 if (data->flags & OPP_CONFIG_REGULATOR_HELPER)
2537 _opp_put_config_regulators_helper(data->opp_table);
2538 if (data->flags & OPP_CONFIG_PROP_NAME)
2539 _opp_put_prop_name(data->opp_table);
2540 if (data->flags & OPP_CONFIG_CLK)
2541 _opp_put_clknames(data->opp_table);
2542
2543 dev_pm_opp_put_opp_table(data->opp_table);
2544 kfree(data);
2545 }
2546
2547 /**
2548 * dev_pm_opp_set_config() - Set OPP configuration for the device.
2549 * @dev: Device for which configuration is being set.
2550 * @config: OPP configuration.
2551 *
2552 * This allows all device OPP configurations to be performed at once.
2553 *
2554 * This must be called before any OPPs are initialized for the device. This may
2555 * be called multiple times for the same OPP table, for example once for each
2556 * CPU that share the same table. This must be balanced by the same number of
2557 * calls to dev_pm_opp_clear_config() in order to free the OPP table properly.
2558 *
2559 * This returns a token to the caller, which must be passed to
2560 * dev_pm_opp_clear_config() to free the resources later. The value of the
2561 * returned token will be >= 1 for success and negative for errors. The minimum
2562 * value of 1 is chosen here to make it easy for callers to manage the resource.
2563 */
dev_pm_opp_set_config(struct device * dev,struct dev_pm_opp_config * config)2564 int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
2565 {
2566 struct opp_table *opp_table;
2567 struct opp_config_data *data;
2568 unsigned int id;
2569 int ret;
2570
2571 data = kmalloc(sizeof(*data), GFP_KERNEL);
2572 if (!data)
2573 return -ENOMEM;
2574
2575 opp_table = _add_opp_table(dev, false);
2576 if (IS_ERR(opp_table)) {
2577 kfree(data);
2578 return PTR_ERR(opp_table);
2579 }
2580
2581 data->opp_table = opp_table;
2582 data->flags = 0;
2583
2584 /* This should be called before OPPs are initialized */
2585 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
2586 ret = -EBUSY;
2587 goto err;
2588 }
2589
2590 /* Configure clocks */
2591 if (config->clk_names) {
2592 ret = _opp_set_clknames(opp_table, dev, config->clk_names,
2593 config->config_clks);
2594 if (ret)
2595 goto err;
2596
2597 data->flags |= OPP_CONFIG_CLK;
2598 } else if (config->config_clks) {
2599 /* Don't allow config callback without clocks */
2600 ret = -EINVAL;
2601 goto err;
2602 }
2603
2604 /* Configure property names */
2605 if (config->prop_name) {
2606 ret = _opp_set_prop_name(opp_table, config->prop_name);
2607 if (ret)
2608 goto err;
2609
2610 data->flags |= OPP_CONFIG_PROP_NAME;
2611 }
2612
2613 /* Configure config_regulators helper */
2614 if (config->config_regulators) {
2615 ret = _opp_set_config_regulators_helper(opp_table, dev,
2616 config->config_regulators);
2617 if (ret)
2618 goto err;
2619
2620 data->flags |= OPP_CONFIG_REGULATOR_HELPER;
2621 }
2622
2623 /* Configure supported hardware */
2624 if (config->supported_hw) {
2625 ret = _opp_set_supported_hw(opp_table, config->supported_hw,
2626 config->supported_hw_count);
2627 if (ret)
2628 goto err;
2629
2630 data->flags |= OPP_CONFIG_SUPPORTED_HW;
2631 }
2632
2633 /* Configure supplies */
2634 if (config->regulator_names) {
2635 ret = _opp_set_regulators(opp_table, dev,
2636 config->regulator_names);
2637 if (ret)
2638 goto err;
2639
2640 data->flags |= OPP_CONFIG_REGULATOR;
2641 }
2642
2643 /* Attach genpds */
2644 if (config->genpd_names) {
2645 ret = _opp_attach_genpd(opp_table, dev, config->genpd_names,
2646 config->virt_devs);
2647 if (ret)
2648 goto err;
2649
2650 data->flags |= OPP_CONFIG_GENPD;
2651 }
2652
2653 ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
2654 GFP_KERNEL);
2655 if (ret)
2656 goto err;
2657
2658 return id;
2659
2660 err:
2661 _opp_clear_config(data);
2662 return ret;
2663 }
2664 EXPORT_SYMBOL_GPL(dev_pm_opp_set_config);
2665
2666 /**
2667 * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration.
2668 * @opp_table: OPP table returned from dev_pm_opp_set_config().
2669 *
2670 * This allows all device OPP configurations to be cleared at once. This must be
2671 * called once for each call made to dev_pm_opp_set_config(), in order to free
2672 * the OPPs properly.
2673 *
2674 * Currently the first call itself ends up freeing all the OPP configurations,
2675 * while the later ones only drop the OPP table reference. This works well for
2676 * now as we would never want to use an half initialized OPP table and want to
2677 * remove the configurations together.
2678 */
dev_pm_opp_clear_config(int token)2679 void dev_pm_opp_clear_config(int token)
2680 {
2681 struct opp_config_data *data;
2682
2683 /*
2684 * This lets the callers call this unconditionally and keep their code
2685 * simple.
2686 */
2687 if (unlikely(token <= 0))
2688 return;
2689
2690 data = xa_erase(&opp_configs, token);
2691 if (WARN_ON(!data))
2692 return;
2693
2694 _opp_clear_config(data);
2695 }
2696 EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config);
2697
devm_pm_opp_config_release(void * token)2698 static void devm_pm_opp_config_release(void *token)
2699 {
2700 dev_pm_opp_clear_config((unsigned long)token);
2701 }
2702
2703 /**
2704 * devm_pm_opp_set_config() - Set OPP configuration for the device.
2705 * @dev: Device for which configuration is being set.
2706 * @config: OPP configuration.
2707 *
2708 * This allows all device OPP configurations to be performed at once.
2709 * This is a resource-managed variant of dev_pm_opp_set_config().
2710 *
2711 * Return: 0 on success and errorno otherwise.
2712 */
devm_pm_opp_set_config(struct device * dev,struct dev_pm_opp_config * config)2713 int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
2714 {
2715 int token = dev_pm_opp_set_config(dev, config);
2716
2717 if (token < 0)
2718 return token;
2719
2720 return devm_add_action_or_reset(dev, devm_pm_opp_config_release,
2721 (void *) ((unsigned long) token));
2722 }
2723 EXPORT_SYMBOL_GPL(devm_pm_opp_set_config);
2724
2725 /**
2726 * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
2727 * @src_table: OPP table which has @dst_table as one of its required OPP table.
2728 * @dst_table: Required OPP table of the @src_table.
2729 * @src_opp: OPP from the @src_table.
2730 *
2731 * This function returns the OPP (present in @dst_table) pointed out by the
2732 * "required-opps" property of the @src_opp (present in @src_table).
2733 *
2734 * The callers are required to call dev_pm_opp_put() for the returned OPP after
2735 * use.
2736 *
2737 * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise.
2738 */
dev_pm_opp_xlate_required_opp(struct opp_table * src_table,struct opp_table * dst_table,struct dev_pm_opp * src_opp)2739 struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
2740 struct opp_table *dst_table,
2741 struct dev_pm_opp *src_opp)
2742 {
2743 struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV);
2744 int i;
2745
2746 if (!src_table || !dst_table || !src_opp ||
2747 !src_table->required_opp_tables)
2748 return ERR_PTR(-EINVAL);
2749
2750 /* required-opps not fully initialized yet */
2751 if (lazy_linking_pending(src_table))
2752 return ERR_PTR(-EBUSY);
2753
2754 for (i = 0; i < src_table->required_opp_count; i++) {
2755 if (src_table->required_opp_tables[i] == dst_table) {
2756 mutex_lock(&src_table->lock);
2757
2758 list_for_each_entry(opp, &src_table->opp_list, node) {
2759 if (opp == src_opp) {
2760 dest_opp = opp->required_opps[i];
2761 dev_pm_opp_get(dest_opp);
2762 break;
2763 }
2764 }
2765
2766 mutex_unlock(&src_table->lock);
2767 break;
2768 }
2769 }
2770
2771 if (IS_ERR(dest_opp)) {
2772 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__,
2773 src_table, dst_table);
2774 }
2775
2776 return dest_opp;
2777 }
2778 EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp);
2779
2780 /**
2781 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
2782 * @src_table: OPP table which has dst_table as one of its required OPP table.
2783 * @dst_table: Required OPP table of the src_table.
2784 * @pstate: Current performance state of the src_table.
2785 *
2786 * This Returns pstate of the OPP (present in @dst_table) pointed out by the
2787 * "required-opps" property of the OPP (present in @src_table) which has
2788 * performance state set to @pstate.
2789 *
2790 * Return: Zero or positive performance state on success, otherwise negative
2791 * value on errors.
2792 */
dev_pm_opp_xlate_performance_state(struct opp_table * src_table,struct opp_table * dst_table,unsigned int pstate)2793 int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
2794 struct opp_table *dst_table,
2795 unsigned int pstate)
2796 {
2797 struct dev_pm_opp *opp;
2798 int dest_pstate = -EINVAL;
2799 int i;
2800
2801 /*
2802 * Normally the src_table will have the "required_opps" property set to
2803 * point to one of the OPPs in the dst_table, but in some cases the
2804 * genpd and its master have one to one mapping of performance states
2805 * and so none of them have the "required-opps" property set. Return the
2806 * pstate of the src_table as it is in such cases.
2807 */
2808 if (!src_table || !src_table->required_opp_count)
2809 return pstate;
2810
2811 /* Both OPP tables must belong to genpds */
2812 if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) {
2813 pr_err("%s: Performance state is only valid for genpds.\n", __func__);
2814 return -EINVAL;
2815 }
2816
2817 /* required-opps not fully initialized yet */
2818 if (lazy_linking_pending(src_table))
2819 return -EBUSY;
2820
2821 for (i = 0; i < src_table->required_opp_count; i++) {
2822 if (src_table->required_opp_tables[i]->np == dst_table->np)
2823 break;
2824 }
2825
2826 if (unlikely(i == src_table->required_opp_count)) {
2827 pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
2828 __func__, src_table, dst_table);
2829 return -EINVAL;
2830 }
2831
2832 mutex_lock(&src_table->lock);
2833
2834 list_for_each_entry(opp, &src_table->opp_list, node) {
2835 if (opp->level == pstate) {
2836 dest_pstate = opp->required_opps[i]->level;
2837 goto unlock;
2838 }
2839 }
2840
2841 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
2842 dst_table);
2843
2844 unlock:
2845 mutex_unlock(&src_table->lock);
2846
2847 return dest_pstate;
2848 }
2849
2850 /**
2851 * dev_pm_opp_add() - Add an OPP table from a table definitions
2852 * @dev: device for which we do this operation
2853 * @freq: Frequency in Hz for this OPP
2854 * @u_volt: Voltage in uVolts for this OPP
2855 *
2856 * This function adds an opp definition to the opp table and returns status.
2857 * The opp is made available by default and it can be controlled using
2858 * dev_pm_opp_enable/disable functions.
2859 *
2860 * Return:
2861 * 0 On success OR
2862 * Duplicate OPPs (both freq and volt are same) and opp->available
2863 * -EEXIST Freq are same and volt are different OR
2864 * Duplicate OPPs (both freq and volt are same) and !opp->available
2865 * -ENOMEM Memory allocation failure
2866 */
dev_pm_opp_add(struct device * dev,unsigned long freq,unsigned long u_volt)2867 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
2868 {
2869 struct opp_table *opp_table;
2870 int ret;
2871
2872 opp_table = _add_opp_table(dev, true);
2873 if (IS_ERR(opp_table))
2874 return PTR_ERR(opp_table);
2875
2876 /* Fix regulator count for dynamic OPPs */
2877 opp_table->regulator_count = 1;
2878
2879 ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
2880 if (ret)
2881 dev_pm_opp_put_opp_table(opp_table);
2882
2883 return ret;
2884 }
2885 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
2886
2887 /**
2888 * _opp_set_availability() - helper to set the availability of an opp
2889 * @dev: device for which we do this operation
2890 * @freq: OPP frequency to modify availability
2891 * @availability_req: availability status requested for this opp
2892 *
2893 * Set the availability of an OPP, opp_{enable,disable} share a common logic
2894 * which is isolated here.
2895 *
2896 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2897 * copy operation, returns 0 if no modification was done OR modification was
2898 * successful.
2899 */
_opp_set_availability(struct device * dev,unsigned long freq,bool availability_req)2900 static int _opp_set_availability(struct device *dev, unsigned long freq,
2901 bool availability_req)
2902 {
2903 struct opp_table *opp_table;
2904 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
2905 int r = 0;
2906
2907 /* Find the opp_table */
2908 opp_table = _find_opp_table(dev);
2909 if (IS_ERR(opp_table)) {
2910 r = PTR_ERR(opp_table);
2911 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
2912 return r;
2913 }
2914
2915 if (!assert_single_clk(opp_table, 0)) {
2916 r = -EINVAL;
2917 goto put_table;
2918 }
2919
2920 mutex_lock(&opp_table->lock);
2921
2922 /* Do we have the frequency? */
2923 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2924 if (tmp_opp->rates[0] == freq) {
2925 opp = tmp_opp;
2926 break;
2927 }
2928 }
2929
2930 if (IS_ERR(opp)) {
2931 r = PTR_ERR(opp);
2932 goto unlock;
2933 }
2934
2935 /* Is update really needed? */
2936 if (opp->available == availability_req)
2937 goto unlock;
2938
2939 opp->available = availability_req;
2940
2941 dev_pm_opp_get(opp);
2942 mutex_unlock(&opp_table->lock);
2943
2944 /* Notify the change of the OPP availability */
2945 if (availability_req)
2946 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
2947 opp);
2948 else
2949 blocking_notifier_call_chain(&opp_table->head,
2950 OPP_EVENT_DISABLE, opp);
2951
2952 dev_pm_opp_put(opp);
2953 goto put_table;
2954
2955 unlock:
2956 mutex_unlock(&opp_table->lock);
2957 put_table:
2958 dev_pm_opp_put_opp_table(opp_table);
2959 return r;
2960 }
2961
2962 /**
2963 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
2964 * @dev: device for which we do this operation
2965 * @freq: OPP frequency to adjust voltage of
2966 * @u_volt: new OPP target voltage
2967 * @u_volt_min: new OPP min voltage
2968 * @u_volt_max: new OPP max voltage
2969 *
2970 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2971 * copy operation, returns 0 if no modifcation was done OR modification was
2972 * successful.
2973 */
dev_pm_opp_adjust_voltage(struct device * dev,unsigned long freq,unsigned long u_volt,unsigned long u_volt_min,unsigned long u_volt_max)2974 int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
2975 unsigned long u_volt, unsigned long u_volt_min,
2976 unsigned long u_volt_max)
2977
2978 {
2979 struct opp_table *opp_table;
2980 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
2981 int r = 0;
2982
2983 /* Find the opp_table */
2984 opp_table = _find_opp_table(dev);
2985 if (IS_ERR(opp_table)) {
2986 r = PTR_ERR(opp_table);
2987 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
2988 return r;
2989 }
2990
2991 if (!assert_single_clk(opp_table, 0)) {
2992 r = -EINVAL;
2993 goto put_table;
2994 }
2995
2996 mutex_lock(&opp_table->lock);
2997
2998 /* Do we have the frequency? */
2999 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
3000 if (tmp_opp->rates[0] == freq) {
3001 opp = tmp_opp;
3002 break;
3003 }
3004 }
3005
3006 if (IS_ERR(opp)) {
3007 r = PTR_ERR(opp);
3008 goto adjust_unlock;
3009 }
3010
3011 /* Is update really needed? */
3012 if (opp->supplies->u_volt == u_volt)
3013 goto adjust_unlock;
3014
3015 opp->supplies->u_volt = u_volt;
3016 opp->supplies->u_volt_min = u_volt_min;
3017 opp->supplies->u_volt_max = u_volt_max;
3018
3019 dev_pm_opp_get(opp);
3020 mutex_unlock(&opp_table->lock);
3021
3022 /* Notify the voltage change of the OPP */
3023 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
3024 opp);
3025
3026 dev_pm_opp_put(opp);
3027 goto put_table;
3028
3029 adjust_unlock:
3030 mutex_unlock(&opp_table->lock);
3031 put_table:
3032 dev_pm_opp_put_opp_table(opp_table);
3033 return r;
3034 }
3035 EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
3036
3037 /**
3038 * dev_pm_opp_enable() - Enable a specific OPP
3039 * @dev: device for which we do this operation
3040 * @freq: OPP frequency to enable
3041 *
3042 * Enables a provided opp. If the operation is valid, this returns 0, else the
3043 * corresponding error value. It is meant to be used for users an OPP available
3044 * after being temporarily made unavailable with dev_pm_opp_disable.
3045 *
3046 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
3047 * copy operation, returns 0 if no modification was done OR modification was
3048 * successful.
3049 */
dev_pm_opp_enable(struct device * dev,unsigned long freq)3050 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
3051 {
3052 return _opp_set_availability(dev, freq, true);
3053 }
3054 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
3055
3056 /**
3057 * dev_pm_opp_disable() - Disable a specific OPP
3058 * @dev: device for which we do this operation
3059 * @freq: OPP frequency to disable
3060 *
3061 * Disables a provided opp. If the operation is valid, this returns
3062 * 0, else the corresponding error value. It is meant to be a temporary
3063 * control by users to make this OPP not available until the circumstances are
3064 * right to make it available again (with a call to dev_pm_opp_enable).
3065 *
3066 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
3067 * copy operation, returns 0 if no modification was done OR modification was
3068 * successful.
3069 */
dev_pm_opp_disable(struct device * dev,unsigned long freq)3070 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
3071 {
3072 return _opp_set_availability(dev, freq, false);
3073 }
3074 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
3075
3076 /**
3077 * dev_pm_opp_register_notifier() - Register OPP notifier for the device
3078 * @dev: Device for which notifier needs to be registered
3079 * @nb: Notifier block to be registered
3080 *
3081 * Return: 0 on success or a negative error value.
3082 */
dev_pm_opp_register_notifier(struct device * dev,struct notifier_block * nb)3083 int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
3084 {
3085 struct opp_table *opp_table;
3086 int ret;
3087
3088 opp_table = _find_opp_table(dev);
3089 if (IS_ERR(opp_table))
3090 return PTR_ERR(opp_table);
3091
3092 ret = blocking_notifier_chain_register(&opp_table->head, nb);
3093
3094 dev_pm_opp_put_opp_table(opp_table);
3095
3096 return ret;
3097 }
3098 EXPORT_SYMBOL(dev_pm_opp_register_notifier);
3099
3100 /**
3101 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
3102 * @dev: Device for which notifier needs to be unregistered
3103 * @nb: Notifier block to be unregistered
3104 *
3105 * Return: 0 on success or a negative error value.
3106 */
dev_pm_opp_unregister_notifier(struct device * dev,struct notifier_block * nb)3107 int dev_pm_opp_unregister_notifier(struct device *dev,
3108 struct notifier_block *nb)
3109 {
3110 struct opp_table *opp_table;
3111 int ret;
3112
3113 opp_table = _find_opp_table(dev);
3114 if (IS_ERR(opp_table))
3115 return PTR_ERR(opp_table);
3116
3117 ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
3118
3119 dev_pm_opp_put_opp_table(opp_table);
3120
3121 return ret;
3122 }
3123 EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
3124
3125 /**
3126 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
3127 * @dev: device pointer used to lookup OPP table.
3128 *
3129 * Free both OPPs created using static entries present in DT and the
3130 * dynamically added entries.
3131 */
dev_pm_opp_remove_table(struct device * dev)3132 void dev_pm_opp_remove_table(struct device *dev)
3133 {
3134 struct opp_table *opp_table;
3135
3136 /* Check for existing table for 'dev' */
3137 opp_table = _find_opp_table(dev);
3138 if (IS_ERR(opp_table)) {
3139 int error = PTR_ERR(opp_table);
3140
3141 if (error != -ENODEV)
3142 WARN(1, "%s: opp_table: %d\n",
3143 IS_ERR_OR_NULL(dev) ?
3144 "Invalid device" : dev_name(dev),
3145 error);
3146 return;
3147 }
3148
3149 /*
3150 * Drop the extra reference only if the OPP table was successfully added
3151 * with dev_pm_opp_of_add_table() earlier.
3152 **/
3153 if (_opp_remove_all_static(opp_table))
3154 dev_pm_opp_put_opp_table(opp_table);
3155
3156 /* Drop reference taken by _find_opp_table() */
3157 dev_pm_opp_put_opp_table(opp_table);
3158 }
3159 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
3160
3161 /**
3162 * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
3163 * @dev: device for which we do this operation
3164 *
3165 * Sync voltage state of the OPP table regulators.
3166 *
3167 * Return: 0 on success or a negative error value.
3168 */
dev_pm_opp_sync_regulators(struct device * dev)3169 int dev_pm_opp_sync_regulators(struct device *dev)
3170 {
3171 struct opp_table *opp_table;
3172 struct regulator *reg;
3173 int i, ret = 0;
3174
3175 /* Device may not have OPP table */
3176 opp_table = _find_opp_table(dev);
3177 if (IS_ERR(opp_table))
3178 return 0;
3179
3180 /* Regulator may not be required for the device */
3181 if (unlikely(!opp_table->regulators))
3182 goto put_table;
3183
3184 /* Nothing to sync if voltage wasn't changed */
3185 if (!opp_table->enabled)
3186 goto put_table;
3187
3188 for (i = 0; i < opp_table->regulator_count; i++) {
3189 reg = opp_table->regulators[i];
3190 ret = regulator_sync_voltage(reg);
3191 if (ret)
3192 break;
3193 }
3194 put_table:
3195 /* Drop reference taken by _find_opp_table() */
3196 dev_pm_opp_put_opp_table(opp_table);
3197
3198 return ret;
3199 }
3200 EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
3201