xref: /openbmc/linux/drivers/clk/qcom/clk-rpmh.c (revision 2455f0e1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk-provider.h>
7 #include <linux/err.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/platform_device.h>
13 #include <soc/qcom/cmd-db.h>
14 #include <soc/qcom/rpmh.h>
15 #include <soc/qcom/tcs.h>
16 
17 #include <dt-bindings/clock/qcom,rpmh.h>
18 
19 #define CLK_RPMH_ARC_EN_OFFSET		0
20 #define CLK_RPMH_VRM_EN_OFFSET		4
21 
22 /**
23  * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM)
24  * @unit: divisor used to convert Hz value to an RPMh msg
25  * @width: multiplier used to convert Hz value to an RPMh msg
26  * @vcd: virtual clock domain that this bcm belongs to
27  * @reserved: reserved to pad the struct
28  */
29 struct bcm_db {
30 	__le32 unit;
31 	__le16 width;
32 	u8 vcd;
33 	u8 reserved;
34 };
35 
36 /**
37  * struct clk_rpmh - individual rpmh clock data structure
38  * @hw:			handle between common and hardware-specific interfaces
39  * @res_name:		resource name for the rpmh clock
40  * @div:		clock divider to compute the clock rate
41  * @res_addr:		base address of the rpmh resource within the RPMh
42  * @res_on_val:		rpmh clock enable value
43  * @state:		rpmh clock requested state
44  * @aggr_state:		rpmh clock aggregated state
45  * @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh
46  * @valid_state_mask:	mask to determine the state of the rpmh clock
47  * @unit:		divisor to convert rate to rpmh msg in magnitudes of Khz
48  * @dev:		device to which it is attached
49  * @peer:		pointer to the clock rpmh sibling
50  */
51 struct clk_rpmh {
52 	struct clk_hw hw;
53 	const char *res_name;
54 	u8 div;
55 	u32 res_addr;
56 	u32 res_on_val;
57 	u32 state;
58 	u32 aggr_state;
59 	u32 last_sent_aggr_state;
60 	u32 valid_state_mask;
61 	u32 unit;
62 	struct device *dev;
63 	struct clk_rpmh *peer;
64 };
65 
66 struct clk_rpmh_desc {
67 	struct clk_hw **clks;
68 	size_t num_clks;
69 };
70 
71 static DEFINE_MUTEX(rpmh_clk_lock);
72 
73 #define __DEFINE_CLK_RPMH(_name, _clk_name, _res_name,			\
74 			  _res_en_offset, _res_on, _div)		\
75 	static struct clk_rpmh clk_rpmh_##_clk_name##_ao;		\
76 	static struct clk_rpmh clk_rpmh_##_clk_name = {			\
77 		.res_name = _res_name,					\
78 		.res_addr = _res_en_offset,				\
79 		.res_on_val = _res_on,					\
80 		.div = _div,						\
81 		.peer = &clk_rpmh_##_clk_name##_ao,			\
82 		.valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) |	\
83 				      BIT(RPMH_ACTIVE_ONLY_STATE) |	\
84 				      BIT(RPMH_SLEEP_STATE)),		\
85 		.hw.init = &(struct clk_init_data){			\
86 			.ops = &clk_rpmh_ops,				\
87 			.name = #_name,					\
88 			.parent_data =  &(const struct clk_parent_data){ \
89 					.fw_name = "xo",		\
90 					.name = "xo_board",		\
91 			},						\
92 			.num_parents = 1,				\
93 		},							\
94 	};								\
95 	static struct clk_rpmh clk_rpmh_##_clk_name##_ao= {		\
96 		.res_name = _res_name,					\
97 		.res_addr = _res_en_offset,				\
98 		.res_on_val = _res_on,					\
99 		.div = _div,						\
100 		.peer = &clk_rpmh_##_clk_name,				\
101 		.valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) |	\
102 					BIT(RPMH_ACTIVE_ONLY_STATE)),	\
103 		.hw.init = &(struct clk_init_data){			\
104 			.ops = &clk_rpmh_ops,				\
105 			.name = #_name "_ao",				\
106 			.parent_data =  &(const struct clk_parent_data){ \
107 					.fw_name = "xo",		\
108 					.name = "xo_board",		\
109 			},						\
110 			.num_parents = 1,				\
111 		},							\
112 	}
113 
114 #define DEFINE_CLK_RPMH_ARC(_name, _res_name, _res_on, _div)		\
115 	__DEFINE_CLK_RPMH(_name, _name##_##div##_div, _res_name,	\
116 			  CLK_RPMH_ARC_EN_OFFSET, _res_on, _div)
117 
118 #define DEFINE_CLK_RPMH_VRM(_name, _suffix, _res_name, _div)		\
119 	__DEFINE_CLK_RPMH(_name, _name##_suffix, _res_name,		\
120 			  CLK_RPMH_VRM_EN_OFFSET, 1, _div)
121 
122 #define DEFINE_CLK_RPMH_BCM(_name, _res_name)				\
123 	static struct clk_rpmh clk_rpmh_##_name = {			\
124 		.res_name = _res_name,					\
125 		.valid_state_mask = BIT(RPMH_ACTIVE_ONLY_STATE),	\
126 		.div = 1,						\
127 		.hw.init = &(struct clk_init_data){			\
128 			.ops = &clk_rpmh_bcm_ops,			\
129 			.name = #_name,					\
130 		},							\
131 	}
132 
133 static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw)
134 {
135 	return container_of(_hw, struct clk_rpmh, hw);
136 }
137 
138 static inline bool has_state_changed(struct clk_rpmh *c, u32 state)
139 {
140 	return (c->last_sent_aggr_state & BIT(state))
141 		!= (c->aggr_state & BIT(state));
142 }
143 
144 static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state,
145 			 struct tcs_cmd *cmd, bool wait)
146 {
147 	if (wait)
148 		return rpmh_write(c->dev, state, cmd, 1);
149 
150 	return rpmh_write_async(c->dev, state, cmd, 1);
151 }
152 
153 static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
154 {
155 	struct tcs_cmd cmd = { 0 };
156 	u32 cmd_state, on_val;
157 	enum rpmh_state state = RPMH_SLEEP_STATE;
158 	int ret;
159 	bool wait;
160 
161 	cmd.addr = c->res_addr;
162 	cmd_state = c->aggr_state;
163 	on_val = c->res_on_val;
164 
165 	for (; state <= RPMH_ACTIVE_ONLY_STATE; state++) {
166 		if (has_state_changed(c, state)) {
167 			if (cmd_state & BIT(state))
168 				cmd.data = on_val;
169 
170 			wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE;
171 			ret = clk_rpmh_send(c, state, &cmd, wait);
172 			if (ret) {
173 				dev_err(c->dev, "set %s state of %s failed: (%d)\n",
174 					!state ? "sleep" :
175 					state == RPMH_WAKE_ONLY_STATE	?
176 					"wake" : "active", c->res_name, ret);
177 				return ret;
178 			}
179 		}
180 	}
181 
182 	c->last_sent_aggr_state = c->aggr_state;
183 	c->peer->last_sent_aggr_state =  c->last_sent_aggr_state;
184 
185 	return 0;
186 }
187 
188 /*
189  * Update state and aggregate state values based on enable value.
190  */
191 static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c,
192 						bool enable)
193 {
194 	int ret;
195 
196 	c->state = enable ? c->valid_state_mask : 0;
197 	c->aggr_state = c->state | c->peer->state;
198 	c->peer->aggr_state = c->aggr_state;
199 
200 	ret = clk_rpmh_send_aggregate_command(c);
201 	if (!ret)
202 		return 0;
203 
204 	if (ret && enable)
205 		c->state = 0;
206 	else if (ret)
207 		c->state = c->valid_state_mask;
208 
209 	WARN(1, "clk: %s failed to %s\n", c->res_name,
210 	     enable ? "enable" : "disable");
211 	return ret;
212 }
213 
214 static int clk_rpmh_prepare(struct clk_hw *hw)
215 {
216 	struct clk_rpmh *c = to_clk_rpmh(hw);
217 	int ret = 0;
218 
219 	mutex_lock(&rpmh_clk_lock);
220 	ret = clk_rpmh_aggregate_state_send_command(c, true);
221 	mutex_unlock(&rpmh_clk_lock);
222 
223 	return ret;
224 }
225 
226 static void clk_rpmh_unprepare(struct clk_hw *hw)
227 {
228 	struct clk_rpmh *c = to_clk_rpmh(hw);
229 
230 	mutex_lock(&rpmh_clk_lock);
231 	clk_rpmh_aggregate_state_send_command(c, false);
232 	mutex_unlock(&rpmh_clk_lock);
233 };
234 
235 static unsigned long clk_rpmh_recalc_rate(struct clk_hw *hw,
236 					unsigned long prate)
237 {
238 	struct clk_rpmh *r = to_clk_rpmh(hw);
239 
240 	/*
241 	 * RPMh clocks have a fixed rate. Return static rate.
242 	 */
243 	return prate / r->div;
244 }
245 
246 static const struct clk_ops clk_rpmh_ops = {
247 	.prepare	= clk_rpmh_prepare,
248 	.unprepare	= clk_rpmh_unprepare,
249 	.recalc_rate	= clk_rpmh_recalc_rate,
250 };
251 
252 static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
253 {
254 	struct tcs_cmd cmd = { 0 };
255 	u32 cmd_state;
256 	int ret = 0;
257 
258 	mutex_lock(&rpmh_clk_lock);
259 	if (enable) {
260 		cmd_state = 1;
261 		if (c->aggr_state)
262 			cmd_state = c->aggr_state;
263 	} else {
264 		cmd_state = 0;
265 	}
266 
267 	if (c->last_sent_aggr_state != cmd_state) {
268 		cmd.addr = c->res_addr;
269 		cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
270 
271 		/*
272 		 * Send only an active only state request. RPMh continues to
273 		 * use the active state when we're in sleep/wake state as long
274 		 * as the sleep/wake state has never been set.
275 		 */
276 		ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable);
277 		if (ret) {
278 			dev_err(c->dev, "set active state of %s failed: (%d)\n",
279 				c->res_name, ret);
280 		} else {
281 			c->last_sent_aggr_state = cmd_state;
282 		}
283 	}
284 
285 	mutex_unlock(&rpmh_clk_lock);
286 
287 	return ret;
288 }
289 
290 static int clk_rpmh_bcm_prepare(struct clk_hw *hw)
291 {
292 	struct clk_rpmh *c = to_clk_rpmh(hw);
293 
294 	return clk_rpmh_bcm_send_cmd(c, true);
295 }
296 
297 static void clk_rpmh_bcm_unprepare(struct clk_hw *hw)
298 {
299 	struct clk_rpmh *c = to_clk_rpmh(hw);
300 
301 	clk_rpmh_bcm_send_cmd(c, false);
302 }
303 
304 static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
305 				 unsigned long parent_rate)
306 {
307 	struct clk_rpmh *c = to_clk_rpmh(hw);
308 
309 	c->aggr_state = rate / c->unit;
310 	/*
311 	 * Since any non-zero value sent to hw would result in enabling the
312 	 * clock, only send the value if the clock has already been prepared.
313 	 */
314 	if (clk_hw_is_prepared(hw))
315 		clk_rpmh_bcm_send_cmd(c, true);
316 
317 	return 0;
318 }
319 
320 static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate,
321 				unsigned long *parent_rate)
322 {
323 	return rate;
324 }
325 
326 static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
327 					unsigned long prate)
328 {
329 	struct clk_rpmh *c = to_clk_rpmh(hw);
330 
331 	return c->aggr_state * c->unit;
332 }
333 
334 static const struct clk_ops clk_rpmh_bcm_ops = {
335 	.prepare	= clk_rpmh_bcm_prepare,
336 	.unprepare	= clk_rpmh_bcm_unprepare,
337 	.set_rate	= clk_rpmh_bcm_set_rate,
338 	.round_rate	= clk_rpmh_round_rate,
339 	.recalc_rate	= clk_rpmh_bcm_recalc_rate,
340 };
341 
342 /* Resource name must match resource id present in cmd-db */
343 DEFINE_CLK_RPMH_ARC(bi_tcxo, "xo.lvl", 0x3, 1);
344 DEFINE_CLK_RPMH_ARC(bi_tcxo, "xo.lvl", 0x3, 2);
345 DEFINE_CLK_RPMH_ARC(bi_tcxo, "xo.lvl", 0x3, 4);
346 DEFINE_CLK_RPMH_ARC(qlink, "qphy.lvl", 0x1, 4);
347 
348 DEFINE_CLK_RPMH_VRM(ln_bb_clk1, _a2, "lnbclka1", 2);
349 DEFINE_CLK_RPMH_VRM(ln_bb_clk2, _a2, "lnbclka2", 2);
350 DEFINE_CLK_RPMH_VRM(ln_bb_clk3, _a2, "lnbclka3", 2);
351 
352 DEFINE_CLK_RPMH_VRM(ln_bb_clk1, _a4, "lnbclka1", 4);
353 DEFINE_CLK_RPMH_VRM(ln_bb_clk2, _a4, "lnbclka2", 4);
354 
355 DEFINE_CLK_RPMH_VRM(ln_bb_clk2, _g4, "lnbclkg2", 4);
356 DEFINE_CLK_RPMH_VRM(ln_bb_clk3, _g4, "lnbclkg3", 4);
357 
358 DEFINE_CLK_RPMH_VRM(rf_clk1, _a, "rfclka1", 1);
359 DEFINE_CLK_RPMH_VRM(rf_clk2, _a, "rfclka2", 1);
360 DEFINE_CLK_RPMH_VRM(rf_clk3, _a, "rfclka3", 1);
361 DEFINE_CLK_RPMH_VRM(rf_clk4, _a, "rfclka4", 1);
362 DEFINE_CLK_RPMH_VRM(rf_clk5, _a, "rfclka5", 1);
363 
364 DEFINE_CLK_RPMH_VRM(rf_clk1, _d, "rfclkd1", 1);
365 DEFINE_CLK_RPMH_VRM(rf_clk2, _d, "rfclkd2", 1);
366 DEFINE_CLK_RPMH_VRM(rf_clk3, _d, "rfclkd3", 1);
367 DEFINE_CLK_RPMH_VRM(rf_clk4, _d, "rfclkd4", 1);
368 
369 DEFINE_CLK_RPMH_VRM(div_clk1, _div2, "divclka1", 2);
370 
371 DEFINE_CLK_RPMH_BCM(ce, "CE0");
372 DEFINE_CLK_RPMH_BCM(hwkm, "HK0");
373 DEFINE_CLK_RPMH_BCM(ipa, "IP0");
374 DEFINE_CLK_RPMH_BCM(pka, "PKA0");
375 DEFINE_CLK_RPMH_BCM(qpic_clk, "QP0");
376 
377 static struct clk_hw *sdm845_rpmh_clocks[] = {
378 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div2.hw,
379 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div2_ao.hw,
380 	[RPMH_LN_BB_CLK2]	= &clk_rpmh_ln_bb_clk2_a2.hw,
381 	[RPMH_LN_BB_CLK2_A]	= &clk_rpmh_ln_bb_clk2_a2_ao.hw,
382 	[RPMH_LN_BB_CLK3]	= &clk_rpmh_ln_bb_clk3_a2.hw,
383 	[RPMH_LN_BB_CLK3_A]	= &clk_rpmh_ln_bb_clk3_a2_ao.hw,
384 	[RPMH_RF_CLK1]		= &clk_rpmh_rf_clk1_a.hw,
385 	[RPMH_RF_CLK1_A]	= &clk_rpmh_rf_clk1_a_ao.hw,
386 	[RPMH_RF_CLK2]		= &clk_rpmh_rf_clk2_a.hw,
387 	[RPMH_RF_CLK2_A]	= &clk_rpmh_rf_clk2_a_ao.hw,
388 	[RPMH_RF_CLK3]		= &clk_rpmh_rf_clk3_a.hw,
389 	[RPMH_RF_CLK3_A]	= &clk_rpmh_rf_clk3_a_ao.hw,
390 	[RPMH_IPA_CLK]		= &clk_rpmh_ipa.hw,
391 	[RPMH_CE_CLK]		= &clk_rpmh_ce.hw,
392 };
393 
394 static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
395 	.clks = sdm845_rpmh_clocks,
396 	.num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
397 };
398 
399 static struct clk_hw *sdm670_rpmh_clocks[] = {
400 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div2.hw,
401 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div2_ao.hw,
402 	[RPMH_LN_BB_CLK2]	= &clk_rpmh_ln_bb_clk2_a2.hw,
403 	[RPMH_LN_BB_CLK2_A]	= &clk_rpmh_ln_bb_clk2_a2_ao.hw,
404 	[RPMH_LN_BB_CLK3]	= &clk_rpmh_ln_bb_clk3_a2.hw,
405 	[RPMH_LN_BB_CLK3_A]	= &clk_rpmh_ln_bb_clk3_a2_ao.hw,
406 	[RPMH_RF_CLK1]		= &clk_rpmh_rf_clk1_a.hw,
407 	[RPMH_RF_CLK1_A]	= &clk_rpmh_rf_clk1_a_ao.hw,
408 	[RPMH_RF_CLK2]		= &clk_rpmh_rf_clk2_a.hw,
409 	[RPMH_RF_CLK2_A]	= &clk_rpmh_rf_clk2_a_ao.hw,
410 	[RPMH_IPA_CLK]		= &clk_rpmh_ipa.hw,
411 	[RPMH_CE_CLK]		= &clk_rpmh_ce.hw,
412 };
413 
414 static const struct clk_rpmh_desc clk_rpmh_sdm670 = {
415 	.clks = sdm670_rpmh_clocks,
416 	.num_clks = ARRAY_SIZE(sdm670_rpmh_clocks),
417 };
418 
419 static struct clk_hw *sdx55_rpmh_clocks[] = {
420 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div2.hw,
421 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div2_ao.hw,
422 	[RPMH_RF_CLK1]		= &clk_rpmh_rf_clk1_d.hw,
423 	[RPMH_RF_CLK1_A]	= &clk_rpmh_rf_clk1_d_ao.hw,
424 	[RPMH_RF_CLK2]		= &clk_rpmh_rf_clk2_d.hw,
425 	[RPMH_RF_CLK2_A]	= &clk_rpmh_rf_clk2_d_ao.hw,
426 	[RPMH_QPIC_CLK]		= &clk_rpmh_qpic_clk.hw,
427 	[RPMH_IPA_CLK]		= &clk_rpmh_ipa.hw,
428 };
429 
430 static const struct clk_rpmh_desc clk_rpmh_sdx55 = {
431 	.clks = sdx55_rpmh_clocks,
432 	.num_clks = ARRAY_SIZE(sdx55_rpmh_clocks),
433 };
434 
435 static struct clk_hw *sm8150_rpmh_clocks[] = {
436 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div2.hw,
437 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div2_ao.hw,
438 	[RPMH_LN_BB_CLK2]	= &clk_rpmh_ln_bb_clk2_a2.hw,
439 	[RPMH_LN_BB_CLK2_A]	= &clk_rpmh_ln_bb_clk2_a2_ao.hw,
440 	[RPMH_LN_BB_CLK3]	= &clk_rpmh_ln_bb_clk3_a2.hw,
441 	[RPMH_LN_BB_CLK3_A]	= &clk_rpmh_ln_bb_clk3_a2_ao.hw,
442 	[RPMH_RF_CLK1]		= &clk_rpmh_rf_clk1_a.hw,
443 	[RPMH_RF_CLK1_A]	= &clk_rpmh_rf_clk1_a_ao.hw,
444 	[RPMH_RF_CLK2]		= &clk_rpmh_rf_clk2_a.hw,
445 	[RPMH_RF_CLK2_A]	= &clk_rpmh_rf_clk2_a_ao.hw,
446 	[RPMH_RF_CLK3]		= &clk_rpmh_rf_clk3_a.hw,
447 	[RPMH_RF_CLK3_A]	= &clk_rpmh_rf_clk3_a_ao.hw,
448 };
449 
450 static const struct clk_rpmh_desc clk_rpmh_sm8150 = {
451 	.clks = sm8150_rpmh_clocks,
452 	.num_clks = ARRAY_SIZE(sm8150_rpmh_clocks),
453 };
454 
455 static struct clk_hw *sc7180_rpmh_clocks[] = {
456 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div2.hw,
457 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div2_ao.hw,
458 	[RPMH_LN_BB_CLK2]	= &clk_rpmh_ln_bb_clk2_a2.hw,
459 	[RPMH_LN_BB_CLK2_A]	= &clk_rpmh_ln_bb_clk2_a2_ao.hw,
460 	[RPMH_LN_BB_CLK3]	= &clk_rpmh_ln_bb_clk3_a2.hw,
461 	[RPMH_LN_BB_CLK3_A]	= &clk_rpmh_ln_bb_clk3_a2_ao.hw,
462 	[RPMH_RF_CLK1]		= &clk_rpmh_rf_clk1_a.hw,
463 	[RPMH_RF_CLK1_A]	= &clk_rpmh_rf_clk1_a_ao.hw,
464 	[RPMH_RF_CLK2]		= &clk_rpmh_rf_clk2_a.hw,
465 	[RPMH_RF_CLK2_A]	= &clk_rpmh_rf_clk2_a_ao.hw,
466 	[RPMH_IPA_CLK]		= &clk_rpmh_ipa.hw,
467 };
468 
469 static const struct clk_rpmh_desc clk_rpmh_sc7180 = {
470 	.clks = sc7180_rpmh_clocks,
471 	.num_clks = ARRAY_SIZE(sc7180_rpmh_clocks),
472 };
473 
474 static struct clk_hw *sc8180x_rpmh_clocks[] = {
475 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div2.hw,
476 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div2_ao.hw,
477 	[RPMH_LN_BB_CLK2]	= &clk_rpmh_ln_bb_clk2_a2.hw,
478 	[RPMH_LN_BB_CLK2_A]	= &clk_rpmh_ln_bb_clk2_a2_ao.hw,
479 	[RPMH_LN_BB_CLK3]	= &clk_rpmh_ln_bb_clk3_a2.hw,
480 	[RPMH_LN_BB_CLK3_A]	= &clk_rpmh_ln_bb_clk3_a2_ao.hw,
481 	[RPMH_RF_CLK1]		= &clk_rpmh_rf_clk1_d.hw,
482 	[RPMH_RF_CLK1_A]	= &clk_rpmh_rf_clk1_d_ao.hw,
483 	[RPMH_RF_CLK2]		= &clk_rpmh_rf_clk2_d.hw,
484 	[RPMH_RF_CLK2_A]	= &clk_rpmh_rf_clk2_d_ao.hw,
485 	[RPMH_RF_CLK3]		= &clk_rpmh_rf_clk3_d.hw,
486 	[RPMH_RF_CLK3_A]	= &clk_rpmh_rf_clk3_d_ao.hw,
487 };
488 
489 static const struct clk_rpmh_desc clk_rpmh_sc8180x = {
490 	.clks = sc8180x_rpmh_clocks,
491 	.num_clks = ARRAY_SIZE(sc8180x_rpmh_clocks),
492 };
493 
494 static struct clk_hw *sm8250_rpmh_clocks[] = {
495 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div2.hw,
496 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div2_ao.hw,
497 	[RPMH_LN_BB_CLK1]	= &clk_rpmh_ln_bb_clk1_a2.hw,
498 	[RPMH_LN_BB_CLK1_A]	= &clk_rpmh_ln_bb_clk1_a2_ao.hw,
499 	[RPMH_LN_BB_CLK2]	= &clk_rpmh_ln_bb_clk2_a2.hw,
500 	[RPMH_LN_BB_CLK2_A]	= &clk_rpmh_ln_bb_clk2_a2_ao.hw,
501 	[RPMH_LN_BB_CLK3]	= &clk_rpmh_ln_bb_clk3_a2.hw,
502 	[RPMH_LN_BB_CLK3_A]	= &clk_rpmh_ln_bb_clk3_a2_ao.hw,
503 	[RPMH_RF_CLK1]		= &clk_rpmh_rf_clk1_a.hw,
504 	[RPMH_RF_CLK1_A]	= &clk_rpmh_rf_clk1_a_ao.hw,
505 	[RPMH_RF_CLK3]		= &clk_rpmh_rf_clk3_a.hw,
506 	[RPMH_RF_CLK3_A]	= &clk_rpmh_rf_clk3_a_ao.hw,
507 };
508 
509 static const struct clk_rpmh_desc clk_rpmh_sm8250 = {
510 	.clks = sm8250_rpmh_clocks,
511 	.num_clks = ARRAY_SIZE(sm8250_rpmh_clocks),
512 };
513 
514 static struct clk_hw *sm8350_rpmh_clocks[] = {
515 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div2.hw,
516 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div2_ao.hw,
517 	[RPMH_DIV_CLK1]		= &clk_rpmh_div_clk1_div2.hw,
518 	[RPMH_DIV_CLK1_A]	= &clk_rpmh_div_clk1_div2_ao.hw,
519 	[RPMH_LN_BB_CLK1]	= &clk_rpmh_ln_bb_clk1_a2.hw,
520 	[RPMH_LN_BB_CLK1_A]	= &clk_rpmh_ln_bb_clk1_a2_ao.hw,
521 	[RPMH_LN_BB_CLK2]	= &clk_rpmh_ln_bb_clk2_a2.hw,
522 	[RPMH_LN_BB_CLK2_A]	= &clk_rpmh_ln_bb_clk2_a2_ao.hw,
523 	[RPMH_RF_CLK1]		= &clk_rpmh_rf_clk1_a.hw,
524 	[RPMH_RF_CLK1_A]	= &clk_rpmh_rf_clk1_a_ao.hw,
525 	[RPMH_RF_CLK3]		= &clk_rpmh_rf_clk3_a.hw,
526 	[RPMH_RF_CLK3_A]	= &clk_rpmh_rf_clk3_a_ao.hw,
527 	[RPMH_RF_CLK4]		= &clk_rpmh_rf_clk4_a.hw,
528 	[RPMH_RF_CLK4_A]	= &clk_rpmh_rf_clk4_a_ao.hw,
529 	[RPMH_RF_CLK5]		= &clk_rpmh_rf_clk5_a.hw,
530 	[RPMH_RF_CLK5_A]	= &clk_rpmh_rf_clk5_a_ao.hw,
531 	[RPMH_IPA_CLK]		= &clk_rpmh_ipa.hw,
532 	[RPMH_PKA_CLK]		= &clk_rpmh_pka.hw,
533 	[RPMH_HWKM_CLK]		= &clk_rpmh_hwkm.hw,
534 };
535 
536 static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
537 	.clks = sm8350_rpmh_clocks,
538 	.num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
539 };
540 
541 static struct clk_hw *sc8280xp_rpmh_clocks[] = {
542 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div2.hw,
543 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div2_ao.hw,
544 	[RPMH_LN_BB_CLK3]       = &clk_rpmh_ln_bb_clk3_a2.hw,
545 	[RPMH_LN_BB_CLK3_A]     = &clk_rpmh_ln_bb_clk3_a2_ao.hw,
546 	[RPMH_IPA_CLK]          = &clk_rpmh_ipa.hw,
547 	[RPMH_PKA_CLK]          = &clk_rpmh_pka.hw,
548 	[RPMH_HWKM_CLK]         = &clk_rpmh_hwkm.hw,
549 };
550 
551 static const struct clk_rpmh_desc clk_rpmh_sc8280xp = {
552 	.clks = sc8280xp_rpmh_clocks,
553 	.num_clks = ARRAY_SIZE(sc8280xp_rpmh_clocks),
554 };
555 
556 static struct clk_hw *sm8450_rpmh_clocks[] = {
557 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div4.hw,
558 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div4_ao.hw,
559 	[RPMH_LN_BB_CLK1]	= &clk_rpmh_ln_bb_clk1_a4.hw,
560 	[RPMH_LN_BB_CLK1_A]	= &clk_rpmh_ln_bb_clk1_a4_ao.hw,
561 	[RPMH_LN_BB_CLK2]	= &clk_rpmh_ln_bb_clk2_a4.hw,
562 	[RPMH_LN_BB_CLK2_A]	= &clk_rpmh_ln_bb_clk2_a4_ao.hw,
563 	[RPMH_RF_CLK1]		= &clk_rpmh_rf_clk1_a.hw,
564 	[RPMH_RF_CLK1_A]	= &clk_rpmh_rf_clk1_a_ao.hw,
565 	[RPMH_RF_CLK2]		= &clk_rpmh_rf_clk2_a.hw,
566 	[RPMH_RF_CLK2_A]	= &clk_rpmh_rf_clk2_a_ao.hw,
567 	[RPMH_RF_CLK3]		= &clk_rpmh_rf_clk3_a.hw,
568 	[RPMH_RF_CLK3_A]	= &clk_rpmh_rf_clk3_a_ao.hw,
569 	[RPMH_RF_CLK4]		= &clk_rpmh_rf_clk4_a.hw,
570 	[RPMH_RF_CLK4_A]	= &clk_rpmh_rf_clk4_a_ao.hw,
571 	[RPMH_IPA_CLK]		= &clk_rpmh_ipa.hw,
572 };
573 
574 static const struct clk_rpmh_desc clk_rpmh_sm8450 = {
575 	.clks = sm8450_rpmh_clocks,
576 	.num_clks = ARRAY_SIZE(sm8450_rpmh_clocks),
577 };
578 
579 static struct clk_hw *sc7280_rpmh_clocks[] = {
580 	[RPMH_CXO_CLK]      = &clk_rpmh_bi_tcxo_div4.hw,
581 	[RPMH_CXO_CLK_A]    = &clk_rpmh_bi_tcxo_div4_ao.hw,
582 	[RPMH_LN_BB_CLK2]   = &clk_rpmh_ln_bb_clk2_a2.hw,
583 	[RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw,
584 	[RPMH_RF_CLK1]      = &clk_rpmh_rf_clk1_a.hw,
585 	[RPMH_RF_CLK1_A]    = &clk_rpmh_rf_clk1_a_ao.hw,
586 	[RPMH_RF_CLK3]      = &clk_rpmh_rf_clk3_a.hw,
587 	[RPMH_RF_CLK3_A]    = &clk_rpmh_rf_clk3_a_ao.hw,
588 	[RPMH_RF_CLK4]      = &clk_rpmh_rf_clk4_a.hw,
589 	[RPMH_RF_CLK4_A]    = &clk_rpmh_rf_clk4_a_ao.hw,
590 	[RPMH_IPA_CLK]      = &clk_rpmh_ipa.hw,
591 	[RPMH_PKA_CLK]      = &clk_rpmh_pka.hw,
592 	[RPMH_HWKM_CLK]     = &clk_rpmh_hwkm.hw,
593 };
594 
595 static const struct clk_rpmh_desc clk_rpmh_sc7280 = {
596 	.clks = sc7280_rpmh_clocks,
597 	.num_clks = ARRAY_SIZE(sc7280_rpmh_clocks),
598 };
599 
600 static struct clk_hw *sm6350_rpmh_clocks[] = {
601 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div4.hw,
602 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div4_ao.hw,
603 	[RPMH_LN_BB_CLK2]	= &clk_rpmh_ln_bb_clk2_g4.hw,
604 	[RPMH_LN_BB_CLK2_A]	= &clk_rpmh_ln_bb_clk2_g4_ao.hw,
605 	[RPMH_LN_BB_CLK3]	= &clk_rpmh_ln_bb_clk3_g4.hw,
606 	[RPMH_LN_BB_CLK3_A]	= &clk_rpmh_ln_bb_clk3_g4_ao.hw,
607 	[RPMH_QLINK_CLK]	= &clk_rpmh_qlink_div4.hw,
608 	[RPMH_QLINK_CLK_A]	= &clk_rpmh_qlink_div4_ao.hw,
609 	[RPMH_IPA_CLK]		= &clk_rpmh_ipa.hw,
610 };
611 
612 static const struct clk_rpmh_desc clk_rpmh_sm6350 = {
613 	.clks = sm6350_rpmh_clocks,
614 	.num_clks = ARRAY_SIZE(sm6350_rpmh_clocks),
615 };
616 
617 static struct clk_hw *sdx65_rpmh_clocks[] = {
618 	[RPMH_CXO_CLK]          = &clk_rpmh_bi_tcxo_div4.hw,
619 	[RPMH_CXO_CLK_A]        = &clk_rpmh_bi_tcxo_div4_ao.hw,
620 	[RPMH_LN_BB_CLK1]       = &clk_rpmh_ln_bb_clk1_a4.hw,
621 	[RPMH_LN_BB_CLK1_A]     = &clk_rpmh_ln_bb_clk1_a4_ao.hw,
622 	[RPMH_RF_CLK1]          = &clk_rpmh_rf_clk1_a.hw,
623 	[RPMH_RF_CLK1_A]        = &clk_rpmh_rf_clk1_a_ao.hw,
624 	[RPMH_RF_CLK2]          = &clk_rpmh_rf_clk2_a.hw,
625 	[RPMH_RF_CLK2_A]        = &clk_rpmh_rf_clk2_a_ao.hw,
626 	[RPMH_RF_CLK3]          = &clk_rpmh_rf_clk3_a.hw,
627 	[RPMH_RF_CLK3_A]        = &clk_rpmh_rf_clk3_a_ao.hw,
628 	[RPMH_RF_CLK4]          = &clk_rpmh_rf_clk4_a.hw,
629 	[RPMH_RF_CLK4_A]        = &clk_rpmh_rf_clk4_a_ao.hw,
630 	[RPMH_IPA_CLK]          = &clk_rpmh_ipa.hw,
631 	[RPMH_QPIC_CLK]         = &clk_rpmh_qpic_clk.hw,
632 };
633 
634 static const struct clk_rpmh_desc clk_rpmh_sdx65 = {
635 	.clks = sdx65_rpmh_clocks,
636 	.num_clks = ARRAY_SIZE(sdx65_rpmh_clocks),
637 };
638 
639 static struct clk_hw *qdu1000_rpmh_clocks[] = {
640 	[RPMH_CXO_CLK]      = &clk_rpmh_bi_tcxo_div1.hw,
641 	[RPMH_CXO_CLK_A]    = &clk_rpmh_bi_tcxo_div1_ao.hw,
642 };
643 
644 static const struct clk_rpmh_desc clk_rpmh_qdu1000 = {
645 	.clks = qdu1000_rpmh_clocks,
646 	.num_clks = ARRAY_SIZE(qdu1000_rpmh_clocks),
647 };
648 
649 static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
650 					 void *data)
651 {
652 	struct clk_rpmh_desc *rpmh = data;
653 	unsigned int idx = clkspec->args[0];
654 
655 	if (idx >= rpmh->num_clks) {
656 		pr_err("%s: invalid index %u\n", __func__, idx);
657 		return ERR_PTR(-EINVAL);
658 	}
659 
660 	return rpmh->clks[idx];
661 }
662 
663 static int clk_rpmh_probe(struct platform_device *pdev)
664 {
665 	struct clk_hw **hw_clks;
666 	struct clk_rpmh *rpmh_clk;
667 	const struct clk_rpmh_desc *desc;
668 	int ret, i;
669 
670 	desc = of_device_get_match_data(&pdev->dev);
671 	if (!desc)
672 		return -ENODEV;
673 
674 	hw_clks = desc->clks;
675 
676 	for (i = 0; i < desc->num_clks; i++) {
677 		const char *name;
678 		u32 res_addr;
679 		size_t aux_data_len;
680 		const struct bcm_db *data;
681 
682 		if (!hw_clks[i])
683 			continue;
684 
685 		name = hw_clks[i]->init->name;
686 
687 		rpmh_clk = to_clk_rpmh(hw_clks[i]);
688 		res_addr = cmd_db_read_addr(rpmh_clk->res_name);
689 		if (!res_addr) {
690 			dev_err(&pdev->dev, "missing RPMh resource address for %s\n",
691 				rpmh_clk->res_name);
692 			return -ENODEV;
693 		}
694 
695 		data = cmd_db_read_aux_data(rpmh_clk->res_name, &aux_data_len);
696 		if (IS_ERR(data)) {
697 			ret = PTR_ERR(data);
698 			dev_err(&pdev->dev,
699 				"error reading RPMh aux data for %s (%d)\n",
700 				rpmh_clk->res_name, ret);
701 			return ret;
702 		}
703 
704 		/* Convert unit from Khz to Hz */
705 		if (aux_data_len == sizeof(*data))
706 			rpmh_clk->unit = le32_to_cpu(data->unit) * 1000ULL;
707 
708 		rpmh_clk->res_addr += res_addr;
709 		rpmh_clk->dev = &pdev->dev;
710 
711 		ret = devm_clk_hw_register(&pdev->dev, hw_clks[i]);
712 		if (ret) {
713 			dev_err(&pdev->dev, "failed to register %s\n", name);
714 			return ret;
715 		}
716 	}
717 
718 	/* typecast to silence compiler warning */
719 	ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_rpmh_hw_get,
720 					  (void *)desc);
721 	if (ret) {
722 		dev_err(&pdev->dev, "Failed to add clock provider\n");
723 		return ret;
724 	}
725 
726 	dev_dbg(&pdev->dev, "Registered RPMh clocks\n");
727 
728 	return 0;
729 }
730 
731 static const struct of_device_id clk_rpmh_match_table[] = {
732 	{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
733 	{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
734 	{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
735 	{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
736 	{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
737 	{ .compatible = "qcom,sdm670-rpmh-clk", .data = &clk_rpmh_sdm670},
738 	{ .compatible = "qcom,sdx55-rpmh-clk",  .data = &clk_rpmh_sdx55},
739 	{ .compatible = "qcom,sdx65-rpmh-clk",  .data = &clk_rpmh_sdx65},
740 	{ .compatible = "qcom,sm6350-rpmh-clk", .data = &clk_rpmh_sm6350},
741 	{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
742 	{ .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
743 	{ .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350},
744 	{ .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450},
745 	{ .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
746 	{ }
747 };
748 MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
749 
750 static struct platform_driver clk_rpmh_driver = {
751 	.probe		= clk_rpmh_probe,
752 	.driver		= {
753 		.name	= "clk-rpmh",
754 		.of_match_table = clk_rpmh_match_table,
755 	},
756 };
757 
758 static int __init clk_rpmh_init(void)
759 {
760 	return platform_driver_register(&clk_rpmh_driver);
761 }
762 core_initcall(clk_rpmh_init);
763 
764 static void __exit clk_rpmh_exit(void)
765 {
766 	platform_driver_unregister(&clk_rpmh_driver);
767 }
768 module_exit(clk_rpmh_exit);
769 
770 MODULE_DESCRIPTION("QCOM RPMh Clock Driver");
771 MODULE_LICENSE("GPL v2");
772