19c92ab61SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2872f91b5SGeorgi Djakov /*
3872f91b5SGeorgi Djakov * Copyright (c) 2016, Linaro Limited
4872f91b5SGeorgi Djakov * Copyright (c) 2014, The Linux Foundation. All rights reserved.
5872f91b5SGeorgi Djakov */
6872f91b5SGeorgi Djakov
7872f91b5SGeorgi Djakov #include <linux/clk-provider.h>
8872f91b5SGeorgi Djakov #include <linux/err.h>
9872f91b5SGeorgi Djakov #include <linux/export.h>
10872f91b5SGeorgi Djakov #include <linux/init.h>
11872f91b5SGeorgi Djakov #include <linux/kernel.h>
12872f91b5SGeorgi Djakov #include <linux/module.h>
13872f91b5SGeorgi Djakov #include <linux/mutex.h>
14872f91b5SGeorgi Djakov #include <linux/mfd/qcom_rpm.h>
15872f91b5SGeorgi Djakov #include <linux/of.h>
16872f91b5SGeorgi Djakov #include <linux/platform_device.h>
17872f91b5SGeorgi Djakov
18872f91b5SGeorgi Djakov #include <dt-bindings/mfd/qcom-rpm.h>
19872f91b5SGeorgi Djakov #include <dt-bindings/clock/qcom,rpmcc.h>
20872f91b5SGeorgi Djakov
21872f91b5SGeorgi Djakov #define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
22872f91b5SGeorgi Djakov #define QCOM_RPM_SCALING_ENABLE_ID 0x2
238bcde658SSrinivas Kandagatla #define QCOM_RPM_XO_MODE_ON 0x2
24872f91b5SGeorgi Djakov
25129d9cd9SChristian Marangi static const struct clk_parent_data gcc_pxo[] = {
26129d9cd9SChristian Marangi { .fw_name = "pxo", .name = "pxo_board" },
27129d9cd9SChristian Marangi };
28129d9cd9SChristian Marangi
29129d9cd9SChristian Marangi static const struct clk_parent_data gcc_cxo[] = {
30129d9cd9SChristian Marangi { .fw_name = "cxo", .name = "cxo_board" },
31129d9cd9SChristian Marangi };
32129d9cd9SChristian Marangi
333de1c1fdSDmitry Baryshkov #define DEFINE_CLK_RPM(_name, r_id) \
343de1c1fdSDmitry Baryshkov static struct clk_rpm clk_rpm_##_name##_a_clk; \
353de1c1fdSDmitry Baryshkov static struct clk_rpm clk_rpm_##_name##_clk = { \
36872f91b5SGeorgi Djakov .rpm_clk_id = (r_id), \
373de1c1fdSDmitry Baryshkov .peer = &clk_rpm_##_name##_a_clk, \
38872f91b5SGeorgi Djakov .rate = INT_MAX, \
39872f91b5SGeorgi Djakov .hw.init = &(struct clk_init_data){ \
40872f91b5SGeorgi Djakov .ops = &clk_rpm_ops, \
4135a57cdaSDmitry Baryshkov .name = #_name "_clk", \
42129d9cd9SChristian Marangi .parent_data = gcc_pxo, \
43129d9cd9SChristian Marangi .num_parents = ARRAY_SIZE(gcc_pxo), \
44872f91b5SGeorgi Djakov }, \
45872f91b5SGeorgi Djakov }; \
463de1c1fdSDmitry Baryshkov static struct clk_rpm clk_rpm_##_name##_a_clk = { \
47872f91b5SGeorgi Djakov .rpm_clk_id = (r_id), \
483de1c1fdSDmitry Baryshkov .peer = &clk_rpm_##_name##_clk, \
49872f91b5SGeorgi Djakov .active_only = true, \
50872f91b5SGeorgi Djakov .rate = INT_MAX, \
51872f91b5SGeorgi Djakov .hw.init = &(struct clk_init_data){ \
52872f91b5SGeorgi Djakov .ops = &clk_rpm_ops, \
5335a57cdaSDmitry Baryshkov .name = #_name "_a_clk", \
54129d9cd9SChristian Marangi .parent_data = gcc_pxo, \
55129d9cd9SChristian Marangi .num_parents = ARRAY_SIZE(gcc_pxo), \
56872f91b5SGeorgi Djakov }, \
57872f91b5SGeorgi Djakov }
58872f91b5SGeorgi Djakov
593de1c1fdSDmitry Baryshkov #define DEFINE_CLK_RPM_XO_BUFFER(_name, offset) \
603de1c1fdSDmitry Baryshkov static struct clk_rpm clk_rpm_##_name##_clk = { \
618bcde658SSrinivas Kandagatla .rpm_clk_id = QCOM_RPM_CXO_BUFFERS, \
628bcde658SSrinivas Kandagatla .xo_offset = (offset), \
638bcde658SSrinivas Kandagatla .hw.init = &(struct clk_init_data){ \
648bcde658SSrinivas Kandagatla .ops = &clk_rpm_xo_ops, \
65e9bf411aSDmitry Baryshkov .name = #_name "_clk", \
66129d9cd9SChristian Marangi .parent_data = gcc_cxo, \
67129d9cd9SChristian Marangi .num_parents = ARRAY_SIZE(gcc_cxo), \
688bcde658SSrinivas Kandagatla }, \
698bcde658SSrinivas Kandagatla }
708bcde658SSrinivas Kandagatla
713de1c1fdSDmitry Baryshkov #define DEFINE_CLK_RPM_FIXED(_name, r_id, r) \
723de1c1fdSDmitry Baryshkov static struct clk_rpm clk_rpm_##_name##_clk = { \
73d4a69583SLinus Walleij .rpm_clk_id = (r_id), \
74d4a69583SLinus Walleij .rate = (r), \
75d4a69583SLinus Walleij .hw.init = &(struct clk_init_data){ \
76d4a69583SLinus Walleij .ops = &clk_rpm_fixed_ops, \
77e9bf411aSDmitry Baryshkov .name = #_name "_clk", \
78129d9cd9SChristian Marangi .parent_data = gcc_pxo, \
79129d9cd9SChristian Marangi .num_parents = ARRAY_SIZE(gcc_pxo), \
80d4a69583SLinus Walleij }, \
81d4a69583SLinus Walleij }
82d4a69583SLinus Walleij
83872f91b5SGeorgi Djakov #define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
84872f91b5SGeorgi Djakov
858bcde658SSrinivas Kandagatla struct rpm_cc;
868bcde658SSrinivas Kandagatla
87872f91b5SGeorgi Djakov struct clk_rpm {
88872f91b5SGeorgi Djakov const int rpm_clk_id;
898bcde658SSrinivas Kandagatla const int xo_offset;
90872f91b5SGeorgi Djakov const bool active_only;
91872f91b5SGeorgi Djakov unsigned long rate;
92872f91b5SGeorgi Djakov bool enabled;
93872f91b5SGeorgi Djakov bool branch;
94872f91b5SGeorgi Djakov struct clk_rpm *peer;
95872f91b5SGeorgi Djakov struct clk_hw hw;
96872f91b5SGeorgi Djakov struct qcom_rpm *rpm;
978bcde658SSrinivas Kandagatla struct rpm_cc *rpm_cc;
98872f91b5SGeorgi Djakov };
99872f91b5SGeorgi Djakov
100872f91b5SGeorgi Djakov struct rpm_cc {
101872f91b5SGeorgi Djakov struct qcom_rpm *rpm;
102c260524aSGeorgi Djakov struct clk_rpm **clks;
103c260524aSGeorgi Djakov size_t num_clks;
1048bcde658SSrinivas Kandagatla u32 xo_buffer_value;
1058bcde658SSrinivas Kandagatla struct mutex xo_lock;
106872f91b5SGeorgi Djakov };
107872f91b5SGeorgi Djakov
108872f91b5SGeorgi Djakov struct rpm_clk_desc {
109872f91b5SGeorgi Djakov struct clk_rpm **clks;
110872f91b5SGeorgi Djakov size_t num_clks;
111872f91b5SGeorgi Djakov };
112872f91b5SGeorgi Djakov
113872f91b5SGeorgi Djakov static DEFINE_MUTEX(rpm_clk_lock);
114872f91b5SGeorgi Djakov
clk_rpm_handoff(struct clk_rpm * r)115872f91b5SGeorgi Djakov static int clk_rpm_handoff(struct clk_rpm *r)
116872f91b5SGeorgi Djakov {
117872f91b5SGeorgi Djakov int ret;
118872f91b5SGeorgi Djakov u32 value = INT_MAX;
119872f91b5SGeorgi Djakov
120d4a69583SLinus Walleij /*
121d4a69583SLinus Walleij * The vendor tree simply reads the status for this
122d4a69583SLinus Walleij * RPM clock.
123d4a69583SLinus Walleij */
1248bcde658SSrinivas Kandagatla if (r->rpm_clk_id == QCOM_RPM_PLL_4 ||
1258bcde658SSrinivas Kandagatla r->rpm_clk_id == QCOM_RPM_CXO_BUFFERS)
126d4a69583SLinus Walleij return 0;
127d4a69583SLinus Walleij
128872f91b5SGeorgi Djakov ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
129872f91b5SGeorgi Djakov r->rpm_clk_id, &value, 1);
130872f91b5SGeorgi Djakov if (ret)
131872f91b5SGeorgi Djakov return ret;
132872f91b5SGeorgi Djakov ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
133872f91b5SGeorgi Djakov r->rpm_clk_id, &value, 1);
134872f91b5SGeorgi Djakov if (ret)
135872f91b5SGeorgi Djakov return ret;
136872f91b5SGeorgi Djakov
137872f91b5SGeorgi Djakov return 0;
138872f91b5SGeorgi Djakov }
139872f91b5SGeorgi Djakov
clk_rpm_set_rate_active(struct clk_rpm * r,unsigned long rate)140872f91b5SGeorgi Djakov static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate)
141872f91b5SGeorgi Djakov {
142872f91b5SGeorgi Djakov u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
143872f91b5SGeorgi Djakov
144872f91b5SGeorgi Djakov return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
145872f91b5SGeorgi Djakov r->rpm_clk_id, &value, 1);
146872f91b5SGeorgi Djakov }
147872f91b5SGeorgi Djakov
clk_rpm_set_rate_sleep(struct clk_rpm * r,unsigned long rate)148872f91b5SGeorgi Djakov static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate)
149872f91b5SGeorgi Djakov {
150872f91b5SGeorgi Djakov u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
151872f91b5SGeorgi Djakov
152872f91b5SGeorgi Djakov return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
153872f91b5SGeorgi Djakov r->rpm_clk_id, &value, 1);
154872f91b5SGeorgi Djakov }
155872f91b5SGeorgi Djakov
to_active_sleep(struct clk_rpm * r,unsigned long rate,unsigned long * active,unsigned long * sleep)156872f91b5SGeorgi Djakov static void to_active_sleep(struct clk_rpm *r, unsigned long rate,
157872f91b5SGeorgi Djakov unsigned long *active, unsigned long *sleep)
158872f91b5SGeorgi Djakov {
159872f91b5SGeorgi Djakov *active = rate;
160872f91b5SGeorgi Djakov
161872f91b5SGeorgi Djakov /*
162872f91b5SGeorgi Djakov * Active-only clocks don't care what the rate is during sleep. So,
163872f91b5SGeorgi Djakov * they vote for zero.
164872f91b5SGeorgi Djakov */
165872f91b5SGeorgi Djakov if (r->active_only)
166872f91b5SGeorgi Djakov *sleep = 0;
167872f91b5SGeorgi Djakov else
168872f91b5SGeorgi Djakov *sleep = *active;
169872f91b5SGeorgi Djakov }
170872f91b5SGeorgi Djakov
clk_rpm_prepare(struct clk_hw * hw)171872f91b5SGeorgi Djakov static int clk_rpm_prepare(struct clk_hw *hw)
172872f91b5SGeorgi Djakov {
173872f91b5SGeorgi Djakov struct clk_rpm *r = to_clk_rpm(hw);
174872f91b5SGeorgi Djakov struct clk_rpm *peer = r->peer;
175872f91b5SGeorgi Djakov unsigned long this_rate = 0, this_sleep_rate = 0;
176872f91b5SGeorgi Djakov unsigned long peer_rate = 0, peer_sleep_rate = 0;
177872f91b5SGeorgi Djakov unsigned long active_rate, sleep_rate;
178872f91b5SGeorgi Djakov int ret = 0;
179872f91b5SGeorgi Djakov
180872f91b5SGeorgi Djakov mutex_lock(&rpm_clk_lock);
181872f91b5SGeorgi Djakov
182872f91b5SGeorgi Djakov /* Don't send requests to the RPM if the rate has not been set. */
183872f91b5SGeorgi Djakov if (!r->rate)
184872f91b5SGeorgi Djakov goto out;
185872f91b5SGeorgi Djakov
186872f91b5SGeorgi Djakov to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
187872f91b5SGeorgi Djakov
188872f91b5SGeorgi Djakov /* Take peer clock's rate into account only if it's enabled. */
189872f91b5SGeorgi Djakov if (peer->enabled)
190872f91b5SGeorgi Djakov to_active_sleep(peer, peer->rate,
191872f91b5SGeorgi Djakov &peer_rate, &peer_sleep_rate);
192872f91b5SGeorgi Djakov
193872f91b5SGeorgi Djakov active_rate = max(this_rate, peer_rate);
194872f91b5SGeorgi Djakov
195872f91b5SGeorgi Djakov if (r->branch)
196872f91b5SGeorgi Djakov active_rate = !!active_rate;
197872f91b5SGeorgi Djakov
198872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_active(r, active_rate);
199872f91b5SGeorgi Djakov if (ret)
200872f91b5SGeorgi Djakov goto out;
201872f91b5SGeorgi Djakov
202872f91b5SGeorgi Djakov sleep_rate = max(this_sleep_rate, peer_sleep_rate);
203872f91b5SGeorgi Djakov if (r->branch)
204872f91b5SGeorgi Djakov sleep_rate = !!sleep_rate;
205872f91b5SGeorgi Djakov
206872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_sleep(r, sleep_rate);
207872f91b5SGeorgi Djakov if (ret)
208872f91b5SGeorgi Djakov /* Undo the active set vote and restore it */
209872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_active(r, peer_rate);
210872f91b5SGeorgi Djakov
211872f91b5SGeorgi Djakov out:
212872f91b5SGeorgi Djakov if (!ret)
213872f91b5SGeorgi Djakov r->enabled = true;
214872f91b5SGeorgi Djakov
215872f91b5SGeorgi Djakov mutex_unlock(&rpm_clk_lock);
216872f91b5SGeorgi Djakov
217872f91b5SGeorgi Djakov return ret;
218872f91b5SGeorgi Djakov }
219872f91b5SGeorgi Djakov
clk_rpm_unprepare(struct clk_hw * hw)220872f91b5SGeorgi Djakov static void clk_rpm_unprepare(struct clk_hw *hw)
221872f91b5SGeorgi Djakov {
222872f91b5SGeorgi Djakov struct clk_rpm *r = to_clk_rpm(hw);
223872f91b5SGeorgi Djakov struct clk_rpm *peer = r->peer;
224872f91b5SGeorgi Djakov unsigned long peer_rate = 0, peer_sleep_rate = 0;
225872f91b5SGeorgi Djakov unsigned long active_rate, sleep_rate;
226872f91b5SGeorgi Djakov int ret;
227872f91b5SGeorgi Djakov
228872f91b5SGeorgi Djakov mutex_lock(&rpm_clk_lock);
229872f91b5SGeorgi Djakov
230872f91b5SGeorgi Djakov if (!r->rate)
231872f91b5SGeorgi Djakov goto out;
232872f91b5SGeorgi Djakov
233872f91b5SGeorgi Djakov /* Take peer clock's rate into account only if it's enabled. */
234872f91b5SGeorgi Djakov if (peer->enabled)
235872f91b5SGeorgi Djakov to_active_sleep(peer, peer->rate, &peer_rate,
236872f91b5SGeorgi Djakov &peer_sleep_rate);
237872f91b5SGeorgi Djakov
238872f91b5SGeorgi Djakov active_rate = r->branch ? !!peer_rate : peer_rate;
239872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_active(r, active_rate);
240872f91b5SGeorgi Djakov if (ret)
241872f91b5SGeorgi Djakov goto out;
242872f91b5SGeorgi Djakov
243872f91b5SGeorgi Djakov sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
244872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_sleep(r, sleep_rate);
245872f91b5SGeorgi Djakov if (ret)
246872f91b5SGeorgi Djakov goto out;
247872f91b5SGeorgi Djakov
248872f91b5SGeorgi Djakov r->enabled = false;
249872f91b5SGeorgi Djakov
250872f91b5SGeorgi Djakov out:
251872f91b5SGeorgi Djakov mutex_unlock(&rpm_clk_lock);
252872f91b5SGeorgi Djakov }
253872f91b5SGeorgi Djakov
clk_rpm_xo_prepare(struct clk_hw * hw)2548bcde658SSrinivas Kandagatla static int clk_rpm_xo_prepare(struct clk_hw *hw)
2558bcde658SSrinivas Kandagatla {
2568bcde658SSrinivas Kandagatla struct clk_rpm *r = to_clk_rpm(hw);
2578bcde658SSrinivas Kandagatla struct rpm_cc *rcc = r->rpm_cc;
2588bcde658SSrinivas Kandagatla int ret, clk_id = r->rpm_clk_id;
2598bcde658SSrinivas Kandagatla u32 value;
2608bcde658SSrinivas Kandagatla
2618bcde658SSrinivas Kandagatla mutex_lock(&rcc->xo_lock);
2628bcde658SSrinivas Kandagatla
2638bcde658SSrinivas Kandagatla value = rcc->xo_buffer_value | (QCOM_RPM_XO_MODE_ON << r->xo_offset);
2648bcde658SSrinivas Kandagatla ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
2658bcde658SSrinivas Kandagatla if (!ret) {
2668bcde658SSrinivas Kandagatla r->enabled = true;
2678bcde658SSrinivas Kandagatla rcc->xo_buffer_value = value;
2688bcde658SSrinivas Kandagatla }
2698bcde658SSrinivas Kandagatla
2708bcde658SSrinivas Kandagatla mutex_unlock(&rcc->xo_lock);
2718bcde658SSrinivas Kandagatla
2728bcde658SSrinivas Kandagatla return ret;
2738bcde658SSrinivas Kandagatla }
2748bcde658SSrinivas Kandagatla
clk_rpm_xo_unprepare(struct clk_hw * hw)2758bcde658SSrinivas Kandagatla static void clk_rpm_xo_unprepare(struct clk_hw *hw)
2768bcde658SSrinivas Kandagatla {
2778bcde658SSrinivas Kandagatla struct clk_rpm *r = to_clk_rpm(hw);
2788bcde658SSrinivas Kandagatla struct rpm_cc *rcc = r->rpm_cc;
2798bcde658SSrinivas Kandagatla int ret, clk_id = r->rpm_clk_id;
2808bcde658SSrinivas Kandagatla u32 value;
2818bcde658SSrinivas Kandagatla
2828bcde658SSrinivas Kandagatla mutex_lock(&rcc->xo_lock);
2838bcde658SSrinivas Kandagatla
2848bcde658SSrinivas Kandagatla value = rcc->xo_buffer_value & ~(QCOM_RPM_XO_MODE_ON << r->xo_offset);
2858bcde658SSrinivas Kandagatla ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
2868bcde658SSrinivas Kandagatla if (!ret) {
2878bcde658SSrinivas Kandagatla r->enabled = false;
2888bcde658SSrinivas Kandagatla rcc->xo_buffer_value = value;
2898bcde658SSrinivas Kandagatla }
2908bcde658SSrinivas Kandagatla
2918bcde658SSrinivas Kandagatla mutex_unlock(&rcc->xo_lock);
2928bcde658SSrinivas Kandagatla }
2938bcde658SSrinivas Kandagatla
clk_rpm_fixed_prepare(struct clk_hw * hw)294d4a69583SLinus Walleij static int clk_rpm_fixed_prepare(struct clk_hw *hw)
295d4a69583SLinus Walleij {
296d4a69583SLinus Walleij struct clk_rpm *r = to_clk_rpm(hw);
297d4a69583SLinus Walleij u32 value = 1;
298d4a69583SLinus Walleij int ret;
299d4a69583SLinus Walleij
300d4a69583SLinus Walleij ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
301d4a69583SLinus Walleij r->rpm_clk_id, &value, 1);
302d4a69583SLinus Walleij if (!ret)
303d4a69583SLinus Walleij r->enabled = true;
304d4a69583SLinus Walleij
305d4a69583SLinus Walleij return ret;
306d4a69583SLinus Walleij }
307d4a69583SLinus Walleij
clk_rpm_fixed_unprepare(struct clk_hw * hw)308d4a69583SLinus Walleij static void clk_rpm_fixed_unprepare(struct clk_hw *hw)
309d4a69583SLinus Walleij {
310d4a69583SLinus Walleij struct clk_rpm *r = to_clk_rpm(hw);
311d4a69583SLinus Walleij u32 value = 0;
312d4a69583SLinus Walleij int ret;
313d4a69583SLinus Walleij
314d4a69583SLinus Walleij ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
315d4a69583SLinus Walleij r->rpm_clk_id, &value, 1);
316d4a69583SLinus Walleij if (!ret)
317d4a69583SLinus Walleij r->enabled = false;
318d4a69583SLinus Walleij }
319d4a69583SLinus Walleij
clk_rpm_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)320872f91b5SGeorgi Djakov static int clk_rpm_set_rate(struct clk_hw *hw,
321872f91b5SGeorgi Djakov unsigned long rate, unsigned long parent_rate)
322872f91b5SGeorgi Djakov {
323872f91b5SGeorgi Djakov struct clk_rpm *r = to_clk_rpm(hw);
324872f91b5SGeorgi Djakov struct clk_rpm *peer = r->peer;
325872f91b5SGeorgi Djakov unsigned long active_rate, sleep_rate;
326872f91b5SGeorgi Djakov unsigned long this_rate = 0, this_sleep_rate = 0;
327872f91b5SGeorgi Djakov unsigned long peer_rate = 0, peer_sleep_rate = 0;
328872f91b5SGeorgi Djakov int ret = 0;
329872f91b5SGeorgi Djakov
330872f91b5SGeorgi Djakov mutex_lock(&rpm_clk_lock);
331872f91b5SGeorgi Djakov
332872f91b5SGeorgi Djakov if (!r->enabled)
333872f91b5SGeorgi Djakov goto out;
334872f91b5SGeorgi Djakov
335872f91b5SGeorgi Djakov to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
336872f91b5SGeorgi Djakov
337872f91b5SGeorgi Djakov /* Take peer clock's rate into account only if it's enabled. */
338872f91b5SGeorgi Djakov if (peer->enabled)
339872f91b5SGeorgi Djakov to_active_sleep(peer, peer->rate,
340872f91b5SGeorgi Djakov &peer_rate, &peer_sleep_rate);
341872f91b5SGeorgi Djakov
342872f91b5SGeorgi Djakov active_rate = max(this_rate, peer_rate);
343872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_active(r, active_rate);
344872f91b5SGeorgi Djakov if (ret)
345872f91b5SGeorgi Djakov goto out;
346872f91b5SGeorgi Djakov
347872f91b5SGeorgi Djakov sleep_rate = max(this_sleep_rate, peer_sleep_rate);
348872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_sleep(r, sleep_rate);
349872f91b5SGeorgi Djakov if (ret)
350872f91b5SGeorgi Djakov goto out;
351872f91b5SGeorgi Djakov
352872f91b5SGeorgi Djakov r->rate = rate;
353872f91b5SGeorgi Djakov
354872f91b5SGeorgi Djakov out:
355872f91b5SGeorgi Djakov mutex_unlock(&rpm_clk_lock);
356872f91b5SGeorgi Djakov
357872f91b5SGeorgi Djakov return ret;
358872f91b5SGeorgi Djakov }
359872f91b5SGeorgi Djakov
clk_rpm_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)360872f91b5SGeorgi Djakov static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
361872f91b5SGeorgi Djakov unsigned long *parent_rate)
362872f91b5SGeorgi Djakov {
363872f91b5SGeorgi Djakov /*
364872f91b5SGeorgi Djakov * RPM handles rate rounding and we don't have a way to
365872f91b5SGeorgi Djakov * know what the rate will be, so just return whatever
366872f91b5SGeorgi Djakov * rate is requested.
367872f91b5SGeorgi Djakov */
368872f91b5SGeorgi Djakov return rate;
369872f91b5SGeorgi Djakov }
370872f91b5SGeorgi Djakov
clk_rpm_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)371872f91b5SGeorgi Djakov static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
372872f91b5SGeorgi Djakov unsigned long parent_rate)
373872f91b5SGeorgi Djakov {
374872f91b5SGeorgi Djakov struct clk_rpm *r = to_clk_rpm(hw);
375872f91b5SGeorgi Djakov
376872f91b5SGeorgi Djakov /*
377872f91b5SGeorgi Djakov * RPM handles rate rounding and we don't have a way to
378872f91b5SGeorgi Djakov * know what the rate will be, so just return whatever
379872f91b5SGeorgi Djakov * rate was set.
380872f91b5SGeorgi Djakov */
381872f91b5SGeorgi Djakov return r->rate;
382872f91b5SGeorgi Djakov }
383872f91b5SGeorgi Djakov
3848bcde658SSrinivas Kandagatla static const struct clk_ops clk_rpm_xo_ops = {
3858bcde658SSrinivas Kandagatla .prepare = clk_rpm_xo_prepare,
3868bcde658SSrinivas Kandagatla .unprepare = clk_rpm_xo_unprepare,
3878bcde658SSrinivas Kandagatla };
3888bcde658SSrinivas Kandagatla
389d4a69583SLinus Walleij static const struct clk_ops clk_rpm_fixed_ops = {
390d4a69583SLinus Walleij .prepare = clk_rpm_fixed_prepare,
391d4a69583SLinus Walleij .unprepare = clk_rpm_fixed_unprepare,
392d4a69583SLinus Walleij .round_rate = clk_rpm_round_rate,
393d4a69583SLinus Walleij .recalc_rate = clk_rpm_recalc_rate,
394d4a69583SLinus Walleij };
395d4a69583SLinus Walleij
396872f91b5SGeorgi Djakov static const struct clk_ops clk_rpm_ops = {
397872f91b5SGeorgi Djakov .prepare = clk_rpm_prepare,
398872f91b5SGeorgi Djakov .unprepare = clk_rpm_unprepare,
399872f91b5SGeorgi Djakov .set_rate = clk_rpm_set_rate,
400872f91b5SGeorgi Djakov .round_rate = clk_rpm_round_rate,
401872f91b5SGeorgi Djakov .recalc_rate = clk_rpm_recalc_rate,
402872f91b5SGeorgi Djakov };
403872f91b5SGeorgi Djakov
4043de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(afab, QCOM_RPM_APPS_FABRIC_CLK);
4053de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(sfab, QCOM_RPM_SYS_FABRIC_CLK);
4063de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(mmfab, QCOM_RPM_MM_FABRIC_CLK);
4073de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(daytona, QCOM_RPM_DAYTONA_FABRIC_CLK);
4083de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(sfpb, QCOM_RPM_SFPB_CLK);
4093de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(cfpb, QCOM_RPM_CFPB_CLK);
4103de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(mmfpb, QCOM_RPM_MMFPB_CLK);
4113de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(smi, QCOM_RPM_SMI_CLK);
4123de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(ebi1, QCOM_RPM_EBI1_CLK);
4133de1c1fdSDmitry Baryshkov
4143de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(qdss, QCOM_RPM_QDSS_CLK);
4153de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(nss_fabric_0, QCOM_RPM_NSS_FABRIC_0_CLK);
4163de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(nss_fabric_1, QCOM_RPM_NSS_FABRIC_1_CLK);
4173de1c1fdSDmitry Baryshkov
4183de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_FIXED(pll4, QCOM_RPM_PLL_4, 540672000);
4193de1c1fdSDmitry Baryshkov
4203de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_XO_BUFFER(xo_d0, 0);
4213de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_XO_BUFFER(xo_d1, 8);
4223de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_XO_BUFFER(xo_a0, 16);
4233de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_XO_BUFFER(xo_a1, 24);
4243de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_XO_BUFFER(xo_a2, 28);
425d4a69583SLinus Walleij
426d4a69583SLinus Walleij static struct clk_rpm *msm8660_clks[] = {
4273de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_CLK] = &clk_rpm_afab_clk,
4283de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_A_CLK] = &clk_rpm_afab_a_clk,
4293de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_CLK] = &clk_rpm_sfab_clk,
4303de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_A_CLK] = &clk_rpm_sfab_a_clk,
4313de1c1fdSDmitry Baryshkov [RPM_MM_FABRIC_CLK] = &clk_rpm_mmfab_clk,
4323de1c1fdSDmitry Baryshkov [RPM_MM_FABRIC_A_CLK] = &clk_rpm_mmfab_a_clk,
4333de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_CLK] = &clk_rpm_daytona_clk,
4343de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_A_CLK] = &clk_rpm_daytona_a_clk,
4353de1c1fdSDmitry Baryshkov [RPM_SFPB_CLK] = &clk_rpm_sfpb_clk,
4363de1c1fdSDmitry Baryshkov [RPM_SFPB_A_CLK] = &clk_rpm_sfpb_a_clk,
4373de1c1fdSDmitry Baryshkov [RPM_CFPB_CLK] = &clk_rpm_cfpb_clk,
4383de1c1fdSDmitry Baryshkov [RPM_CFPB_A_CLK] = &clk_rpm_cfpb_a_clk,
4393de1c1fdSDmitry Baryshkov [RPM_MMFPB_CLK] = &clk_rpm_mmfpb_clk,
4403de1c1fdSDmitry Baryshkov [RPM_MMFPB_A_CLK] = &clk_rpm_mmfpb_a_clk,
4413de1c1fdSDmitry Baryshkov [RPM_SMI_CLK] = &clk_rpm_smi_clk,
4423de1c1fdSDmitry Baryshkov [RPM_SMI_A_CLK] = &clk_rpm_smi_a_clk,
4433de1c1fdSDmitry Baryshkov [RPM_EBI1_CLK] = &clk_rpm_ebi1_clk,
4443de1c1fdSDmitry Baryshkov [RPM_EBI1_A_CLK] = &clk_rpm_ebi1_a_clk,
4453de1c1fdSDmitry Baryshkov [RPM_PLL4_CLK] = &clk_rpm_pll4_clk,
446d4a69583SLinus Walleij };
447d4a69583SLinus Walleij
448d4a69583SLinus Walleij static const struct rpm_clk_desc rpm_clk_msm8660 = {
449d4a69583SLinus Walleij .clks = msm8660_clks,
450d4a69583SLinus Walleij .num_clks = ARRAY_SIZE(msm8660_clks),
451d4a69583SLinus Walleij };
452d4a69583SLinus Walleij
453872f91b5SGeorgi Djakov static struct clk_rpm *apq8064_clks[] = {
4543de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_CLK] = &clk_rpm_afab_clk,
4553de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_A_CLK] = &clk_rpm_afab_a_clk,
4563de1c1fdSDmitry Baryshkov [RPM_CFPB_CLK] = &clk_rpm_cfpb_clk,
4573de1c1fdSDmitry Baryshkov [RPM_CFPB_A_CLK] = &clk_rpm_cfpb_a_clk,
4583de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_CLK] = &clk_rpm_daytona_clk,
4593de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_A_CLK] = &clk_rpm_daytona_a_clk,
4603de1c1fdSDmitry Baryshkov [RPM_EBI1_CLK] = &clk_rpm_ebi1_clk,
4613de1c1fdSDmitry Baryshkov [RPM_EBI1_A_CLK] = &clk_rpm_ebi1_a_clk,
4623de1c1fdSDmitry Baryshkov [RPM_MM_FABRIC_CLK] = &clk_rpm_mmfab_clk,
4633de1c1fdSDmitry Baryshkov [RPM_MM_FABRIC_A_CLK] = &clk_rpm_mmfab_a_clk,
4643de1c1fdSDmitry Baryshkov [RPM_MMFPB_CLK] = &clk_rpm_mmfpb_clk,
4653de1c1fdSDmitry Baryshkov [RPM_MMFPB_A_CLK] = &clk_rpm_mmfpb_a_clk,
4663de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_CLK] = &clk_rpm_sfab_clk,
4673de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_A_CLK] = &clk_rpm_sfab_a_clk,
4683de1c1fdSDmitry Baryshkov [RPM_SFPB_CLK] = &clk_rpm_sfpb_clk,
4693de1c1fdSDmitry Baryshkov [RPM_SFPB_A_CLK] = &clk_rpm_sfpb_a_clk,
4703de1c1fdSDmitry Baryshkov [RPM_QDSS_CLK] = &clk_rpm_qdss_clk,
4713de1c1fdSDmitry Baryshkov [RPM_QDSS_A_CLK] = &clk_rpm_qdss_a_clk,
4723de1c1fdSDmitry Baryshkov [RPM_XO_D0] = &clk_rpm_xo_d0_clk,
4733de1c1fdSDmitry Baryshkov [RPM_XO_D1] = &clk_rpm_xo_d1_clk,
4743de1c1fdSDmitry Baryshkov [RPM_XO_A0] = &clk_rpm_xo_a0_clk,
4753de1c1fdSDmitry Baryshkov [RPM_XO_A1] = &clk_rpm_xo_a1_clk,
4763de1c1fdSDmitry Baryshkov [RPM_XO_A2] = &clk_rpm_xo_a2_clk,
477872f91b5SGeorgi Djakov };
478872f91b5SGeorgi Djakov
479872f91b5SGeorgi Djakov static const struct rpm_clk_desc rpm_clk_apq8064 = {
480872f91b5SGeorgi Djakov .clks = apq8064_clks,
481872f91b5SGeorgi Djakov .num_clks = ARRAY_SIZE(apq8064_clks),
482872f91b5SGeorgi Djakov };
483872f91b5SGeorgi Djakov
484eec15273SAnsuel Smith static struct clk_rpm *ipq806x_clks[] = {
4853de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_CLK] = &clk_rpm_afab_clk,
4863de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_A_CLK] = &clk_rpm_afab_a_clk,
4873de1c1fdSDmitry Baryshkov [RPM_CFPB_CLK] = &clk_rpm_cfpb_clk,
4883de1c1fdSDmitry Baryshkov [RPM_CFPB_A_CLK] = &clk_rpm_cfpb_a_clk,
4893de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_CLK] = &clk_rpm_daytona_clk,
4903de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_A_CLK] = &clk_rpm_daytona_a_clk,
4913de1c1fdSDmitry Baryshkov [RPM_EBI1_CLK] = &clk_rpm_ebi1_clk,
4923de1c1fdSDmitry Baryshkov [RPM_EBI1_A_CLK] = &clk_rpm_ebi1_a_clk,
4933de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_CLK] = &clk_rpm_sfab_clk,
4943de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_A_CLK] = &clk_rpm_sfab_a_clk,
4953de1c1fdSDmitry Baryshkov [RPM_SFPB_CLK] = &clk_rpm_sfpb_clk,
4963de1c1fdSDmitry Baryshkov [RPM_SFPB_A_CLK] = &clk_rpm_sfpb_a_clk,
4973de1c1fdSDmitry Baryshkov [RPM_NSS_FABRIC_0_CLK] = &clk_rpm_nss_fabric_0_clk,
4983de1c1fdSDmitry Baryshkov [RPM_NSS_FABRIC_0_A_CLK] = &clk_rpm_nss_fabric_0_a_clk,
4993de1c1fdSDmitry Baryshkov [RPM_NSS_FABRIC_1_CLK] = &clk_rpm_nss_fabric_1_clk,
5003de1c1fdSDmitry Baryshkov [RPM_NSS_FABRIC_1_A_CLK] = &clk_rpm_nss_fabric_1_a_clk,
501eec15273SAnsuel Smith };
502eec15273SAnsuel Smith
503eec15273SAnsuel Smith static const struct rpm_clk_desc rpm_clk_ipq806x = {
504eec15273SAnsuel Smith .clks = ipq806x_clks,
505eec15273SAnsuel Smith .num_clks = ARRAY_SIZE(ipq806x_clks),
506eec15273SAnsuel Smith };
507eec15273SAnsuel Smith
508872f91b5SGeorgi Djakov static const struct of_device_id rpm_clk_match_table[] = {
509d4a69583SLinus Walleij { .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 },
510d4a69583SLinus Walleij { .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 },
511872f91b5SGeorgi Djakov { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
512eec15273SAnsuel Smith { .compatible = "qcom,rpmcc-ipq806x", .data = &rpm_clk_ipq806x },
513872f91b5SGeorgi Djakov { }
514872f91b5SGeorgi Djakov };
515872f91b5SGeorgi Djakov MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
516872f91b5SGeorgi Djakov
qcom_rpm_clk_hw_get(struct of_phandle_args * clkspec,void * data)517c260524aSGeorgi Djakov static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec,
518c260524aSGeorgi Djakov void *data)
519c260524aSGeorgi Djakov {
520c260524aSGeorgi Djakov struct rpm_cc *rcc = data;
521c260524aSGeorgi Djakov unsigned int idx = clkspec->args[0];
522c260524aSGeorgi Djakov
523c260524aSGeorgi Djakov if (idx >= rcc->num_clks) {
524c260524aSGeorgi Djakov pr_err("%s: invalid index %u\n", __func__, idx);
525c260524aSGeorgi Djakov return ERR_PTR(-EINVAL);
526c260524aSGeorgi Djakov }
527c260524aSGeorgi Djakov
528c260524aSGeorgi Djakov return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
529c260524aSGeorgi Djakov }
530c260524aSGeorgi Djakov
rpm_clk_probe(struct platform_device * pdev)531872f91b5SGeorgi Djakov static int rpm_clk_probe(struct platform_device *pdev)
532872f91b5SGeorgi Djakov {
533872f91b5SGeorgi Djakov struct rpm_cc *rcc;
534872f91b5SGeorgi Djakov int ret;
535872f91b5SGeorgi Djakov size_t num_clks, i;
536872f91b5SGeorgi Djakov struct qcom_rpm *rpm;
537872f91b5SGeorgi Djakov struct clk_rpm **rpm_clks;
538872f91b5SGeorgi Djakov const struct rpm_clk_desc *desc;
539872f91b5SGeorgi Djakov
540872f91b5SGeorgi Djakov rpm = dev_get_drvdata(pdev->dev.parent);
541872f91b5SGeorgi Djakov if (!rpm) {
542872f91b5SGeorgi Djakov dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
543872f91b5SGeorgi Djakov return -ENODEV;
544872f91b5SGeorgi Djakov }
545872f91b5SGeorgi Djakov
546872f91b5SGeorgi Djakov desc = of_device_get_match_data(&pdev->dev);
547872f91b5SGeorgi Djakov if (!desc)
548872f91b5SGeorgi Djakov return -EINVAL;
549872f91b5SGeorgi Djakov
550872f91b5SGeorgi Djakov rpm_clks = desc->clks;
551872f91b5SGeorgi Djakov num_clks = desc->num_clks;
552872f91b5SGeorgi Djakov
553c260524aSGeorgi Djakov rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
554872f91b5SGeorgi Djakov if (!rcc)
555872f91b5SGeorgi Djakov return -ENOMEM;
556872f91b5SGeorgi Djakov
557c260524aSGeorgi Djakov rcc->clks = rpm_clks;
558c260524aSGeorgi Djakov rcc->num_clks = num_clks;
5598bcde658SSrinivas Kandagatla mutex_init(&rcc->xo_lock);
560872f91b5SGeorgi Djakov
561872f91b5SGeorgi Djakov for (i = 0; i < num_clks; i++) {
562872f91b5SGeorgi Djakov if (!rpm_clks[i])
563872f91b5SGeorgi Djakov continue;
564872f91b5SGeorgi Djakov
565872f91b5SGeorgi Djakov rpm_clks[i]->rpm = rpm;
5668bcde658SSrinivas Kandagatla rpm_clks[i]->rpm_cc = rcc;
567872f91b5SGeorgi Djakov
568872f91b5SGeorgi Djakov ret = clk_rpm_handoff(rpm_clks[i]);
569872f91b5SGeorgi Djakov if (ret)
570872f91b5SGeorgi Djakov goto err;
571872f91b5SGeorgi Djakov }
572872f91b5SGeorgi Djakov
573872f91b5SGeorgi Djakov for (i = 0; i < num_clks; i++) {
574c260524aSGeorgi Djakov if (!rpm_clks[i])
575872f91b5SGeorgi Djakov continue;
576872f91b5SGeorgi Djakov
577872f91b5SGeorgi Djakov ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw);
578872f91b5SGeorgi Djakov if (ret)
579872f91b5SGeorgi Djakov goto err;
580872f91b5SGeorgi Djakov }
581872f91b5SGeorgi Djakov
582*f1f67db9SLars-Peter Clausen ret = devm_of_clk_add_hw_provider(&pdev->dev, qcom_rpm_clk_hw_get,
583c260524aSGeorgi Djakov rcc);
584872f91b5SGeorgi Djakov if (ret)
585872f91b5SGeorgi Djakov goto err;
586872f91b5SGeorgi Djakov
587872f91b5SGeorgi Djakov return 0;
588872f91b5SGeorgi Djakov err:
589872f91b5SGeorgi Djakov dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret);
590872f91b5SGeorgi Djakov return ret;
591872f91b5SGeorgi Djakov }
592872f91b5SGeorgi Djakov
593872f91b5SGeorgi Djakov static struct platform_driver rpm_clk_driver = {
594872f91b5SGeorgi Djakov .driver = {
595872f91b5SGeorgi Djakov .name = "qcom-clk-rpm",
596872f91b5SGeorgi Djakov .of_match_table = rpm_clk_match_table,
597872f91b5SGeorgi Djakov },
598872f91b5SGeorgi Djakov .probe = rpm_clk_probe,
599872f91b5SGeorgi Djakov };
600872f91b5SGeorgi Djakov
rpm_clk_init(void)601872f91b5SGeorgi Djakov static int __init rpm_clk_init(void)
602872f91b5SGeorgi Djakov {
603872f91b5SGeorgi Djakov return platform_driver_register(&rpm_clk_driver);
604872f91b5SGeorgi Djakov }
605872f91b5SGeorgi Djakov core_initcall(rpm_clk_init);
606872f91b5SGeorgi Djakov
rpm_clk_exit(void)607872f91b5SGeorgi Djakov static void __exit rpm_clk_exit(void)
608872f91b5SGeorgi Djakov {
609872f91b5SGeorgi Djakov platform_driver_unregister(&rpm_clk_driver);
610872f91b5SGeorgi Djakov }
611872f91b5SGeorgi Djakov module_exit(rpm_clk_exit);
612872f91b5SGeorgi Djakov
613872f91b5SGeorgi Djakov MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
614872f91b5SGeorgi Djakov MODULE_LICENSE("GPL v2");
615872f91b5SGeorgi Djakov MODULE_ALIAS("platform:qcom-clk-rpm");
616