1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/clk-provider.h> 7 #include <linux/err.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/of.h> 11 #include <linux/platform_device.h> 12 #include <soc/qcom/cmd-db.h> 13 #include <soc/qcom/rpmh.h> 14 #include <soc/qcom/tcs.h> 15 16 #include <dt-bindings/clock/qcom,rpmh.h> 17 18 #define CLK_RPMH_ARC_EN_OFFSET 0 19 #define CLK_RPMH_VRM_EN_OFFSET 4 20 21 /** 22 * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM) 23 * @unit: divisor used to convert Hz value to an RPMh msg 24 * @width: multiplier used to convert Hz value to an RPMh msg 25 * @vcd: virtual clock domain that this bcm belongs to 26 * @reserved: reserved to pad the struct 27 */ 28 struct bcm_db { 29 __le32 unit; 30 __le16 width; 31 u8 vcd; 32 u8 reserved; 33 }; 34 35 /** 36 * struct clk_rpmh - individual rpmh clock data structure 37 * @hw: handle between common and hardware-specific interfaces 38 * @res_name: resource name for the rpmh clock 39 * @div: clock divider to compute the clock rate 40 * @res_addr: base address of the rpmh resource within the RPMh 41 * @res_on_val: rpmh clock enable value 42 * @state: rpmh clock requested state 43 * @aggr_state: rpmh clock aggregated state 44 * @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh 45 * @valid_state_mask: mask to determine the state of the rpmh clock 46 * @unit: divisor to convert rate to rpmh msg in magnitudes of Khz 47 * @dev: device to which it is attached 48 * @peer: pointer to the clock rpmh sibling 49 */ 50 struct clk_rpmh { 51 struct clk_hw hw; 52 const char *res_name; 53 u8 div; 54 u32 res_addr; 55 u32 res_on_val; 56 u32 state; 57 u32 aggr_state; 58 u32 last_sent_aggr_state; 59 u32 valid_state_mask; 60 u32 unit; 61 struct device *dev; 62 struct clk_rpmh *peer; 63 }; 64 65 struct clk_rpmh_desc { 66 struct clk_hw **clks; 67 size_t num_clks; 68 }; 69 70 static DEFINE_MUTEX(rpmh_clk_lock); 71 72 #define __DEFINE_CLK_RPMH(_name, _clk_name, _res_name, \ 73 _res_en_offset, _res_on, _div) \ 74 static struct clk_rpmh clk_rpmh_##_clk_name##_ao; \ 75 static struct clk_rpmh clk_rpmh_##_clk_name = { \ 76 .res_name = _res_name, \ 77 .res_addr = _res_en_offset, \ 78 .res_on_val = _res_on, \ 79 .div = _div, \ 80 .peer = &clk_rpmh_##_clk_name##_ao, \ 81 .valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) | \ 82 BIT(RPMH_ACTIVE_ONLY_STATE) | \ 83 BIT(RPMH_SLEEP_STATE)), \ 84 .hw.init = &(struct clk_init_data){ \ 85 .ops = &clk_rpmh_ops, \ 86 .name = #_name, \ 87 .parent_data = &(const struct clk_parent_data){ \ 88 .fw_name = "xo", \ 89 .name = "xo_board", \ 90 }, \ 91 .num_parents = 1, \ 92 }, \ 93 }; \ 94 static struct clk_rpmh clk_rpmh_##_clk_name##_ao= { \ 95 .res_name = _res_name, \ 96 .res_addr = _res_en_offset, \ 97 .res_on_val = _res_on, \ 98 .div = _div, \ 99 .peer = &clk_rpmh_##_clk_name, \ 100 .valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) | \ 101 BIT(RPMH_ACTIVE_ONLY_STATE)), \ 102 .hw.init = &(struct clk_init_data){ \ 103 .ops = &clk_rpmh_ops, \ 104 .name = #_name "_ao", \ 105 .parent_data = &(const struct clk_parent_data){ \ 106 .fw_name = "xo", \ 107 .name = "xo_board", \ 108 }, \ 109 .num_parents = 1, \ 110 }, \ 111 } 112 113 #define DEFINE_CLK_RPMH_ARC(_name, _res_name, _res_on, _div) \ 114 __DEFINE_CLK_RPMH(_name, _name##_##div##_div, _res_name, \ 115 CLK_RPMH_ARC_EN_OFFSET, _res_on, _div) 116 117 #define DEFINE_CLK_RPMH_VRM(_name, _suffix, _res_name, _div) \ 118 __DEFINE_CLK_RPMH(_name, _name##_suffix, _res_name, \ 119 CLK_RPMH_VRM_EN_OFFSET, 1, _div) 120 121 #define DEFINE_CLK_RPMH_BCM(_name, _res_name) \ 122 static struct clk_rpmh clk_rpmh_##_name = { \ 123 .res_name = _res_name, \ 124 .valid_state_mask = BIT(RPMH_ACTIVE_ONLY_STATE), \ 125 .div = 1, \ 126 .hw.init = &(struct clk_init_data){ \ 127 .ops = &clk_rpmh_bcm_ops, \ 128 .name = #_name, \ 129 }, \ 130 } 131 132 static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw) 133 { 134 return container_of(_hw, struct clk_rpmh, hw); 135 } 136 137 static inline bool has_state_changed(struct clk_rpmh *c, u32 state) 138 { 139 return (c->last_sent_aggr_state & BIT(state)) 140 != (c->aggr_state & BIT(state)); 141 } 142 143 static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state, 144 struct tcs_cmd *cmd, bool wait) 145 { 146 if (wait) 147 return rpmh_write(c->dev, state, cmd, 1); 148 149 return rpmh_write_async(c->dev, state, cmd, 1); 150 } 151 152 static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c) 153 { 154 struct tcs_cmd cmd = { 0 }; 155 u32 cmd_state, on_val; 156 enum rpmh_state state = RPMH_SLEEP_STATE; 157 int ret; 158 bool wait; 159 160 cmd.addr = c->res_addr; 161 cmd_state = c->aggr_state; 162 on_val = c->res_on_val; 163 164 for (; state <= RPMH_ACTIVE_ONLY_STATE; state++) { 165 if (has_state_changed(c, state)) { 166 if (cmd_state & BIT(state)) 167 cmd.data = on_val; 168 169 wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE; 170 ret = clk_rpmh_send(c, state, &cmd, wait); 171 if (ret) { 172 dev_err(c->dev, "set %s state of %s failed: (%d)\n", 173 !state ? "sleep" : 174 state == RPMH_WAKE_ONLY_STATE ? 175 "wake" : "active", c->res_name, ret); 176 return ret; 177 } 178 } 179 } 180 181 c->last_sent_aggr_state = c->aggr_state; 182 c->peer->last_sent_aggr_state = c->last_sent_aggr_state; 183 184 return 0; 185 } 186 187 /* 188 * Update state and aggregate state values based on enable value. 189 */ 190 static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c, 191 bool enable) 192 { 193 int ret; 194 195 c->state = enable ? c->valid_state_mask : 0; 196 c->aggr_state = c->state | c->peer->state; 197 c->peer->aggr_state = c->aggr_state; 198 199 ret = clk_rpmh_send_aggregate_command(c); 200 if (!ret) 201 return 0; 202 203 if (ret && enable) 204 c->state = 0; 205 else if (ret) 206 c->state = c->valid_state_mask; 207 208 WARN(1, "clk: %s failed to %s\n", c->res_name, 209 enable ? "enable" : "disable"); 210 return ret; 211 } 212 213 static int clk_rpmh_prepare(struct clk_hw *hw) 214 { 215 struct clk_rpmh *c = to_clk_rpmh(hw); 216 int ret = 0; 217 218 mutex_lock(&rpmh_clk_lock); 219 ret = clk_rpmh_aggregate_state_send_command(c, true); 220 mutex_unlock(&rpmh_clk_lock); 221 222 return ret; 223 } 224 225 static void clk_rpmh_unprepare(struct clk_hw *hw) 226 { 227 struct clk_rpmh *c = to_clk_rpmh(hw); 228 229 mutex_lock(&rpmh_clk_lock); 230 clk_rpmh_aggregate_state_send_command(c, false); 231 mutex_unlock(&rpmh_clk_lock); 232 }; 233 234 static unsigned long clk_rpmh_recalc_rate(struct clk_hw *hw, 235 unsigned long prate) 236 { 237 struct clk_rpmh *r = to_clk_rpmh(hw); 238 239 /* 240 * RPMh clocks have a fixed rate. Return static rate. 241 */ 242 return prate / r->div; 243 } 244 245 static const struct clk_ops clk_rpmh_ops = { 246 .prepare = clk_rpmh_prepare, 247 .unprepare = clk_rpmh_unprepare, 248 .recalc_rate = clk_rpmh_recalc_rate, 249 }; 250 251 static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable) 252 { 253 struct tcs_cmd cmd = { 0 }; 254 u32 cmd_state; 255 int ret = 0; 256 257 mutex_lock(&rpmh_clk_lock); 258 if (enable) { 259 cmd_state = 1; 260 if (c->aggr_state) 261 cmd_state = c->aggr_state; 262 } else { 263 cmd_state = 0; 264 } 265 266 if (c->last_sent_aggr_state != cmd_state) { 267 cmd.addr = c->res_addr; 268 cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state); 269 270 /* 271 * Send only an active only state request. RPMh continues to 272 * use the active state when we're in sleep/wake state as long 273 * as the sleep/wake state has never been set. 274 */ 275 ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable); 276 if (ret) { 277 dev_err(c->dev, "set active state of %s failed: (%d)\n", 278 c->res_name, ret); 279 } else { 280 c->last_sent_aggr_state = cmd_state; 281 } 282 } 283 284 mutex_unlock(&rpmh_clk_lock); 285 286 return ret; 287 } 288 289 static int clk_rpmh_bcm_prepare(struct clk_hw *hw) 290 { 291 struct clk_rpmh *c = to_clk_rpmh(hw); 292 293 return clk_rpmh_bcm_send_cmd(c, true); 294 } 295 296 static void clk_rpmh_bcm_unprepare(struct clk_hw *hw) 297 { 298 struct clk_rpmh *c = to_clk_rpmh(hw); 299 300 clk_rpmh_bcm_send_cmd(c, false); 301 } 302 303 static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate, 304 unsigned long parent_rate) 305 { 306 struct clk_rpmh *c = to_clk_rpmh(hw); 307 308 c->aggr_state = rate / c->unit; 309 /* 310 * Since any non-zero value sent to hw would result in enabling the 311 * clock, only send the value if the clock has already been prepared. 312 */ 313 if (clk_hw_is_prepared(hw)) 314 clk_rpmh_bcm_send_cmd(c, true); 315 316 return 0; 317 } 318 319 static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate, 320 unsigned long *parent_rate) 321 { 322 return rate; 323 } 324 325 static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw, 326 unsigned long prate) 327 { 328 struct clk_rpmh *c = to_clk_rpmh(hw); 329 330 return c->aggr_state * c->unit; 331 } 332 333 static const struct clk_ops clk_rpmh_bcm_ops = { 334 .prepare = clk_rpmh_bcm_prepare, 335 .unprepare = clk_rpmh_bcm_unprepare, 336 .set_rate = clk_rpmh_bcm_set_rate, 337 .round_rate = clk_rpmh_round_rate, 338 .recalc_rate = clk_rpmh_bcm_recalc_rate, 339 }; 340 341 /* Resource name must match resource id present in cmd-db */ 342 DEFINE_CLK_RPMH_ARC(bi_tcxo, "xo.lvl", 0x3, 1); 343 DEFINE_CLK_RPMH_ARC(bi_tcxo, "xo.lvl", 0x3, 2); 344 DEFINE_CLK_RPMH_ARC(bi_tcxo, "xo.lvl", 0x3, 4); 345 DEFINE_CLK_RPMH_ARC(qlink, "qphy.lvl", 0x1, 4); 346 347 DEFINE_CLK_RPMH_VRM(ln_bb_clk1, _a2, "lnbclka1", 2); 348 DEFINE_CLK_RPMH_VRM(ln_bb_clk2, _a2, "lnbclka2", 2); 349 DEFINE_CLK_RPMH_VRM(ln_bb_clk3, _a2, "lnbclka3", 2); 350 351 DEFINE_CLK_RPMH_VRM(ln_bb_clk1, _a4, "lnbclka1", 4); 352 DEFINE_CLK_RPMH_VRM(ln_bb_clk2, _a4, "lnbclka2", 4); 353 354 DEFINE_CLK_RPMH_VRM(ln_bb_clk2, _g4, "lnbclkg2", 4); 355 DEFINE_CLK_RPMH_VRM(ln_bb_clk3, _g4, "lnbclkg3", 4); 356 357 DEFINE_CLK_RPMH_VRM(rf_clk1, _a, "rfclka1", 1); 358 DEFINE_CLK_RPMH_VRM(rf_clk2, _a, "rfclka2", 1); 359 DEFINE_CLK_RPMH_VRM(rf_clk3, _a, "rfclka3", 1); 360 DEFINE_CLK_RPMH_VRM(rf_clk4, _a, "rfclka4", 1); 361 DEFINE_CLK_RPMH_VRM(rf_clk5, _a, "rfclka5", 1); 362 363 DEFINE_CLK_RPMH_VRM(rf_clk1, _d, "rfclkd1", 1); 364 DEFINE_CLK_RPMH_VRM(rf_clk2, _d, "rfclkd2", 1); 365 DEFINE_CLK_RPMH_VRM(rf_clk3, _d, "rfclkd3", 1); 366 DEFINE_CLK_RPMH_VRM(rf_clk4, _d, "rfclkd4", 1); 367 368 DEFINE_CLK_RPMH_VRM(clk1, _a1, "clka1", 1); 369 DEFINE_CLK_RPMH_VRM(clk2, _a1, "clka2", 1); 370 DEFINE_CLK_RPMH_VRM(clk3, _a1, "clka3", 1); 371 DEFINE_CLK_RPMH_VRM(clk4, _a1, "clka4", 1); 372 DEFINE_CLK_RPMH_VRM(clk5, _a1, "clka5", 1); 373 374 DEFINE_CLK_RPMH_VRM(clk6, _a2, "clka6", 2); 375 DEFINE_CLK_RPMH_VRM(clk7, _a2, "clka7", 2); 376 DEFINE_CLK_RPMH_VRM(clk8, _a2, "clka8", 2); 377 378 DEFINE_CLK_RPMH_VRM(div_clk1, _div2, "divclka1", 2); 379 380 DEFINE_CLK_RPMH_BCM(ce, "CE0"); 381 DEFINE_CLK_RPMH_BCM(hwkm, "HK0"); 382 DEFINE_CLK_RPMH_BCM(ipa, "IP0"); 383 DEFINE_CLK_RPMH_BCM(pka, "PKA0"); 384 DEFINE_CLK_RPMH_BCM(qpic_clk, "QP0"); 385 386 static struct clk_hw *sdm845_rpmh_clocks[] = { 387 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 388 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 389 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 390 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 391 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 392 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 393 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 394 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 395 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 396 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 397 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 398 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 399 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 400 [RPMH_CE_CLK] = &clk_rpmh_ce.hw, 401 }; 402 403 static const struct clk_rpmh_desc clk_rpmh_sdm845 = { 404 .clks = sdm845_rpmh_clocks, 405 .num_clks = ARRAY_SIZE(sdm845_rpmh_clocks), 406 }; 407 408 static struct clk_hw *sa8775p_rpmh_clocks[] = { 409 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 410 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 411 [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a2.hw, 412 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 413 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a4_ao.hw, 414 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 415 [RPMH_PKA_CLK] = &clk_rpmh_pka.hw, 416 [RPMH_HWKM_CLK] = &clk_rpmh_hwkm.hw, 417 }; 418 419 static const struct clk_rpmh_desc clk_rpmh_sa8775p = { 420 .clks = sa8775p_rpmh_clocks, 421 .num_clks = ARRAY_SIZE(sa8775p_rpmh_clocks), 422 }; 423 424 static struct clk_hw *sdm670_rpmh_clocks[] = { 425 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 426 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 427 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 428 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 429 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 430 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 431 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 432 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 433 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 434 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 435 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 436 [RPMH_CE_CLK] = &clk_rpmh_ce.hw, 437 }; 438 439 static const struct clk_rpmh_desc clk_rpmh_sdm670 = { 440 .clks = sdm670_rpmh_clocks, 441 .num_clks = ARRAY_SIZE(sdm670_rpmh_clocks), 442 }; 443 444 static struct clk_hw *sdx55_rpmh_clocks[] = { 445 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 446 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 447 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_d.hw, 448 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_d_ao.hw, 449 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_d.hw, 450 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_d_ao.hw, 451 [RPMH_QPIC_CLK] = &clk_rpmh_qpic_clk.hw, 452 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 453 }; 454 455 static const struct clk_rpmh_desc clk_rpmh_sdx55 = { 456 .clks = sdx55_rpmh_clocks, 457 .num_clks = ARRAY_SIZE(sdx55_rpmh_clocks), 458 }; 459 460 static struct clk_hw *sm8150_rpmh_clocks[] = { 461 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 462 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 463 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 464 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 465 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 466 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 467 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 468 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 469 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 470 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 471 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 472 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 473 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 474 }; 475 476 static const struct clk_rpmh_desc clk_rpmh_sm8150 = { 477 .clks = sm8150_rpmh_clocks, 478 .num_clks = ARRAY_SIZE(sm8150_rpmh_clocks), 479 }; 480 481 static struct clk_hw *sc7180_rpmh_clocks[] = { 482 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 483 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 484 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 485 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 486 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 487 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 488 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 489 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 490 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 491 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 492 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 493 }; 494 495 static const struct clk_rpmh_desc clk_rpmh_sc7180 = { 496 .clks = sc7180_rpmh_clocks, 497 .num_clks = ARRAY_SIZE(sc7180_rpmh_clocks), 498 }; 499 500 static struct clk_hw *sc8180x_rpmh_clocks[] = { 501 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 502 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 503 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 504 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 505 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 506 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 507 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_d.hw, 508 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_d_ao.hw, 509 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_d.hw, 510 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_d_ao.hw, 511 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_d.hw, 512 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_d_ao.hw, 513 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 514 }; 515 516 static const struct clk_rpmh_desc clk_rpmh_sc8180x = { 517 .clks = sc8180x_rpmh_clocks, 518 .num_clks = ARRAY_SIZE(sc8180x_rpmh_clocks), 519 }; 520 521 static struct clk_hw *sm8250_rpmh_clocks[] = { 522 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 523 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 524 [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a2.hw, 525 [RPMH_LN_BB_CLK1_A] = &clk_rpmh_ln_bb_clk1_a2_ao.hw, 526 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 527 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 528 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 529 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 530 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 531 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 532 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 533 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 534 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 535 }; 536 537 static const struct clk_rpmh_desc clk_rpmh_sm8250 = { 538 .clks = sm8250_rpmh_clocks, 539 .num_clks = ARRAY_SIZE(sm8250_rpmh_clocks), 540 }; 541 542 static struct clk_hw *sm8350_rpmh_clocks[] = { 543 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 544 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 545 [RPMH_DIV_CLK1] = &clk_rpmh_div_clk1_div2.hw, 546 [RPMH_DIV_CLK1_A] = &clk_rpmh_div_clk1_div2_ao.hw, 547 [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a2.hw, 548 [RPMH_LN_BB_CLK1_A] = &clk_rpmh_ln_bb_clk1_a2_ao.hw, 549 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 550 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 551 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 552 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 553 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 554 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 555 [RPMH_RF_CLK4] = &clk_rpmh_rf_clk4_a.hw, 556 [RPMH_RF_CLK4_A] = &clk_rpmh_rf_clk4_a_ao.hw, 557 [RPMH_RF_CLK5] = &clk_rpmh_rf_clk5_a.hw, 558 [RPMH_RF_CLK5_A] = &clk_rpmh_rf_clk5_a_ao.hw, 559 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 560 [RPMH_PKA_CLK] = &clk_rpmh_pka.hw, 561 [RPMH_HWKM_CLK] = &clk_rpmh_hwkm.hw, 562 }; 563 564 static const struct clk_rpmh_desc clk_rpmh_sm8350 = { 565 .clks = sm8350_rpmh_clocks, 566 .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks), 567 }; 568 569 static struct clk_hw *sc8280xp_rpmh_clocks[] = { 570 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 571 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 572 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 573 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 574 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 575 [RPMH_PKA_CLK] = &clk_rpmh_pka.hw, 576 [RPMH_HWKM_CLK] = &clk_rpmh_hwkm.hw, 577 }; 578 579 static const struct clk_rpmh_desc clk_rpmh_sc8280xp = { 580 .clks = sc8280xp_rpmh_clocks, 581 .num_clks = ARRAY_SIZE(sc8280xp_rpmh_clocks), 582 }; 583 584 static struct clk_hw *sm8450_rpmh_clocks[] = { 585 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw, 586 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw, 587 [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a4.hw, 588 [RPMH_LN_BB_CLK1_A] = &clk_rpmh_ln_bb_clk1_a4_ao.hw, 589 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a4.hw, 590 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a4_ao.hw, 591 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 592 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 593 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 594 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 595 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 596 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 597 [RPMH_RF_CLK4] = &clk_rpmh_rf_clk4_a.hw, 598 [RPMH_RF_CLK4_A] = &clk_rpmh_rf_clk4_a_ao.hw, 599 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 600 }; 601 602 static const struct clk_rpmh_desc clk_rpmh_sm8450 = { 603 .clks = sm8450_rpmh_clocks, 604 .num_clks = ARRAY_SIZE(sm8450_rpmh_clocks), 605 }; 606 607 static struct clk_hw *sm8550_rpmh_clocks[] = { 608 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 609 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 610 [RPMH_LN_BB_CLK1] = &clk_rpmh_clk6_a2.hw, 611 [RPMH_LN_BB_CLK1_A] = &clk_rpmh_clk6_a2_ao.hw, 612 [RPMH_LN_BB_CLK2] = &clk_rpmh_clk7_a2.hw, 613 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_clk7_a2_ao.hw, 614 [RPMH_LN_BB_CLK3] = &clk_rpmh_clk8_a2.hw, 615 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_clk8_a2_ao.hw, 616 [RPMH_RF_CLK1] = &clk_rpmh_clk1_a1.hw, 617 [RPMH_RF_CLK1_A] = &clk_rpmh_clk1_a1_ao.hw, 618 [RPMH_RF_CLK2] = &clk_rpmh_clk2_a1.hw, 619 [RPMH_RF_CLK2_A] = &clk_rpmh_clk2_a1_ao.hw, 620 [RPMH_RF_CLK3] = &clk_rpmh_clk3_a1.hw, 621 [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a1_ao.hw, 622 [RPMH_RF_CLK4] = &clk_rpmh_clk4_a1.hw, 623 [RPMH_RF_CLK4_A] = &clk_rpmh_clk4_a1_ao.hw, 624 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 625 }; 626 627 static const struct clk_rpmh_desc clk_rpmh_sm8550 = { 628 .clks = sm8550_rpmh_clocks, 629 .num_clks = ARRAY_SIZE(sm8550_rpmh_clocks), 630 }; 631 632 static struct clk_hw *sc7280_rpmh_clocks[] = { 633 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw, 634 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw, 635 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 636 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 637 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 638 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 639 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 640 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 641 [RPMH_RF_CLK4] = &clk_rpmh_rf_clk4_a.hw, 642 [RPMH_RF_CLK4_A] = &clk_rpmh_rf_clk4_a_ao.hw, 643 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 644 [RPMH_PKA_CLK] = &clk_rpmh_pka.hw, 645 [RPMH_HWKM_CLK] = &clk_rpmh_hwkm.hw, 646 }; 647 648 static const struct clk_rpmh_desc clk_rpmh_sc7280 = { 649 .clks = sc7280_rpmh_clocks, 650 .num_clks = ARRAY_SIZE(sc7280_rpmh_clocks), 651 }; 652 653 static struct clk_hw *sm6350_rpmh_clocks[] = { 654 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw, 655 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw, 656 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_g4.hw, 657 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_g4_ao.hw, 658 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_g4.hw, 659 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_g4_ao.hw, 660 [RPMH_QLINK_CLK] = &clk_rpmh_qlink_div4.hw, 661 [RPMH_QLINK_CLK_A] = &clk_rpmh_qlink_div4_ao.hw, 662 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 663 }; 664 665 static const struct clk_rpmh_desc clk_rpmh_sm6350 = { 666 .clks = sm6350_rpmh_clocks, 667 .num_clks = ARRAY_SIZE(sm6350_rpmh_clocks), 668 }; 669 670 static struct clk_hw *sdx65_rpmh_clocks[] = { 671 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw, 672 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw, 673 [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a4.hw, 674 [RPMH_LN_BB_CLK1_A] = &clk_rpmh_ln_bb_clk1_a4_ao.hw, 675 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 676 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 677 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 678 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 679 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 680 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 681 [RPMH_RF_CLK4] = &clk_rpmh_rf_clk4_a.hw, 682 [RPMH_RF_CLK4_A] = &clk_rpmh_rf_clk4_a_ao.hw, 683 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 684 [RPMH_QPIC_CLK] = &clk_rpmh_qpic_clk.hw, 685 }; 686 687 static const struct clk_rpmh_desc clk_rpmh_sdx65 = { 688 .clks = sdx65_rpmh_clocks, 689 .num_clks = ARRAY_SIZE(sdx65_rpmh_clocks), 690 }; 691 692 static struct clk_hw *qdu1000_rpmh_clocks[] = { 693 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div1.hw, 694 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div1_ao.hw, 695 }; 696 697 static const struct clk_rpmh_desc clk_rpmh_qdu1000 = { 698 .clks = qdu1000_rpmh_clocks, 699 .num_clks = ARRAY_SIZE(qdu1000_rpmh_clocks), 700 }; 701 702 static struct clk_hw *sdx75_rpmh_clocks[] = { 703 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw, 704 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw, 705 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 706 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 707 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 708 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 709 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 710 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 711 [RPMH_QPIC_CLK] = &clk_rpmh_qpic_clk.hw, 712 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 713 }; 714 715 static const struct clk_rpmh_desc clk_rpmh_sdx75 = { 716 .clks = sdx75_rpmh_clocks, 717 .num_clks = ARRAY_SIZE(sdx75_rpmh_clocks), 718 }; 719 720 static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec, 721 void *data) 722 { 723 struct clk_rpmh_desc *rpmh = data; 724 unsigned int idx = clkspec->args[0]; 725 726 if (idx >= rpmh->num_clks) { 727 pr_err("%s: invalid index %u\n", __func__, idx); 728 return ERR_PTR(-EINVAL); 729 } 730 731 return rpmh->clks[idx]; 732 } 733 734 static int clk_rpmh_probe(struct platform_device *pdev) 735 { 736 struct clk_hw **hw_clks; 737 struct clk_rpmh *rpmh_clk; 738 const struct clk_rpmh_desc *desc; 739 int ret, i; 740 741 desc = of_device_get_match_data(&pdev->dev); 742 if (!desc) 743 return -ENODEV; 744 745 hw_clks = desc->clks; 746 747 for (i = 0; i < desc->num_clks; i++) { 748 const char *name; 749 u32 res_addr; 750 size_t aux_data_len; 751 const struct bcm_db *data; 752 753 if (!hw_clks[i]) 754 continue; 755 756 name = hw_clks[i]->init->name; 757 758 rpmh_clk = to_clk_rpmh(hw_clks[i]); 759 res_addr = cmd_db_read_addr(rpmh_clk->res_name); 760 if (!res_addr) { 761 dev_err(&pdev->dev, "missing RPMh resource address for %s\n", 762 rpmh_clk->res_name); 763 return -ENODEV; 764 } 765 766 data = cmd_db_read_aux_data(rpmh_clk->res_name, &aux_data_len); 767 if (IS_ERR(data)) { 768 ret = PTR_ERR(data); 769 dev_err(&pdev->dev, 770 "error reading RPMh aux data for %s (%d)\n", 771 rpmh_clk->res_name, ret); 772 return ret; 773 } 774 775 /* Convert unit from Khz to Hz */ 776 if (aux_data_len == sizeof(*data)) 777 rpmh_clk->unit = le32_to_cpu(data->unit) * 1000ULL; 778 779 rpmh_clk->res_addr += res_addr; 780 rpmh_clk->dev = &pdev->dev; 781 782 ret = devm_clk_hw_register(&pdev->dev, hw_clks[i]); 783 if (ret) { 784 dev_err(&pdev->dev, "failed to register %s\n", name); 785 return ret; 786 } 787 } 788 789 /* typecast to silence compiler warning */ 790 ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_rpmh_hw_get, 791 (void *)desc); 792 if (ret) { 793 dev_err(&pdev->dev, "Failed to add clock provider\n"); 794 return ret; 795 } 796 797 dev_dbg(&pdev->dev, "Registered RPMh clocks\n"); 798 799 return 0; 800 } 801 802 static const struct of_device_id clk_rpmh_match_table[] = { 803 { .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000}, 804 { .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p}, 805 { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180}, 806 { .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x}, 807 { .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp}, 808 { .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845}, 809 { .compatible = "qcom,sdm670-rpmh-clk", .data = &clk_rpmh_sdm670}, 810 { .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55}, 811 { .compatible = "qcom,sdx65-rpmh-clk", .data = &clk_rpmh_sdx65}, 812 { .compatible = "qcom,sdx75-rpmh-clk", .data = &clk_rpmh_sdx75}, 813 { .compatible = "qcom,sm6350-rpmh-clk", .data = &clk_rpmh_sm6350}, 814 { .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150}, 815 { .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250}, 816 { .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350}, 817 { .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450}, 818 { .compatible = "qcom,sm8550-rpmh-clk", .data = &clk_rpmh_sm8550}, 819 { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280}, 820 { } 821 }; 822 MODULE_DEVICE_TABLE(of, clk_rpmh_match_table); 823 824 static struct platform_driver clk_rpmh_driver = { 825 .probe = clk_rpmh_probe, 826 .driver = { 827 .name = "clk-rpmh", 828 .of_match_table = clk_rpmh_match_table, 829 }, 830 }; 831 832 static int __init clk_rpmh_init(void) 833 { 834 return platform_driver_register(&clk_rpmh_driver); 835 } 836 core_initcall(clk_rpmh_init); 837 838 static void __exit clk_rpmh_exit(void) 839 { 840 platform_driver_unregister(&clk_rpmh_driver); 841 } 842 module_exit(clk_rpmh_exit); 843 844 MODULE_DESCRIPTION("QCOM RPMh Clock Driver"); 845 MODULE_LICENSE("GPL v2"); 846