1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/clk-provider.h> 7 #include <linux/err.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/of.h> 11 #include <linux/platform_device.h> 12 #include <soc/qcom/cmd-db.h> 13 #include <soc/qcom/rpmh.h> 14 #include <soc/qcom/tcs.h> 15 16 #include <dt-bindings/clock/qcom,rpmh.h> 17 18 #define CLK_RPMH_ARC_EN_OFFSET 0 19 #define CLK_RPMH_VRM_EN_OFFSET 4 20 21 /** 22 * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM) 23 * @unit: divisor used to convert Hz value to an RPMh msg 24 * @width: multiplier used to convert Hz value to an RPMh msg 25 * @vcd: virtual clock domain that this bcm belongs to 26 * @reserved: reserved to pad the struct 27 */ 28 struct bcm_db { 29 __le32 unit; 30 __le16 width; 31 u8 vcd; 32 u8 reserved; 33 }; 34 35 /** 36 * struct clk_rpmh - individual rpmh clock data structure 37 * @hw: handle between common and hardware-specific interfaces 38 * @res_name: resource name for the rpmh clock 39 * @div: clock divider to compute the clock rate 40 * @res_addr: base address of the rpmh resource within the RPMh 41 * @res_on_val: rpmh clock enable value 42 * @state: rpmh clock requested state 43 * @aggr_state: rpmh clock aggregated state 44 * @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh 45 * @valid_state_mask: mask to determine the state of the rpmh clock 46 * @unit: divisor to convert rate to rpmh msg in magnitudes of Khz 47 * @dev: device to which it is attached 48 * @peer: pointer to the clock rpmh sibling 49 */ 50 struct clk_rpmh { 51 struct clk_hw hw; 52 const char *res_name; 53 u8 div; 54 u32 res_addr; 55 u32 res_on_val; 56 u32 state; 57 u32 aggr_state; 58 u32 last_sent_aggr_state; 59 u32 valid_state_mask; 60 u32 unit; 61 struct device *dev; 62 struct clk_rpmh *peer; 63 }; 64 65 struct clk_rpmh_desc { 66 struct clk_hw **clks; 67 size_t num_clks; 68 }; 69 70 static DEFINE_MUTEX(rpmh_clk_lock); 71 72 #define __DEFINE_CLK_RPMH(_name, _clk_name, _res_name, \ 73 _res_en_offset, _res_on, _div) \ 74 static struct clk_rpmh clk_rpmh_##_clk_name##_ao; \ 75 static struct clk_rpmh clk_rpmh_##_clk_name = { \ 76 .res_name = _res_name, \ 77 .res_addr = _res_en_offset, \ 78 .res_on_val = _res_on, \ 79 .div = _div, \ 80 .peer = &clk_rpmh_##_clk_name##_ao, \ 81 .valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) | \ 82 BIT(RPMH_ACTIVE_ONLY_STATE) | \ 83 BIT(RPMH_SLEEP_STATE)), \ 84 .hw.init = &(struct clk_init_data){ \ 85 .ops = &clk_rpmh_ops, \ 86 .name = #_name, \ 87 .parent_data = &(const struct clk_parent_data){ \ 88 .fw_name = "xo", \ 89 .name = "xo_board", \ 90 }, \ 91 .num_parents = 1, \ 92 }, \ 93 }; \ 94 static struct clk_rpmh clk_rpmh_##_clk_name##_ao= { \ 95 .res_name = _res_name, \ 96 .res_addr = _res_en_offset, \ 97 .res_on_val = _res_on, \ 98 .div = _div, \ 99 .peer = &clk_rpmh_##_clk_name, \ 100 .valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) | \ 101 BIT(RPMH_ACTIVE_ONLY_STATE)), \ 102 .hw.init = &(struct clk_init_data){ \ 103 .ops = &clk_rpmh_ops, \ 104 .name = #_name "_ao", \ 105 .parent_data = &(const struct clk_parent_data){ \ 106 .fw_name = "xo", \ 107 .name = "xo_board", \ 108 }, \ 109 .num_parents = 1, \ 110 }, \ 111 } 112 113 #define DEFINE_CLK_RPMH_ARC(_name, _res_name, _res_on, _div) \ 114 __DEFINE_CLK_RPMH(_name, _name##_##div##_div, _res_name, \ 115 CLK_RPMH_ARC_EN_OFFSET, _res_on, _div) 116 117 #define DEFINE_CLK_RPMH_VRM(_name, _suffix, _res_name, _div) \ 118 __DEFINE_CLK_RPMH(_name, _name##_suffix, _res_name, \ 119 CLK_RPMH_VRM_EN_OFFSET, 1, _div) 120 121 #define DEFINE_CLK_RPMH_BCM(_name, _res_name) \ 122 static struct clk_rpmh clk_rpmh_##_name = { \ 123 .res_name = _res_name, \ 124 .valid_state_mask = BIT(RPMH_ACTIVE_ONLY_STATE), \ 125 .div = 1, \ 126 .hw.init = &(struct clk_init_data){ \ 127 .ops = &clk_rpmh_bcm_ops, \ 128 .name = #_name, \ 129 }, \ 130 } 131 132 static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw) 133 { 134 return container_of(_hw, struct clk_rpmh, hw); 135 } 136 137 static inline bool has_state_changed(struct clk_rpmh *c, u32 state) 138 { 139 return (c->last_sent_aggr_state & BIT(state)) 140 != (c->aggr_state & BIT(state)); 141 } 142 143 static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state, 144 struct tcs_cmd *cmd, bool wait) 145 { 146 if (wait) 147 return rpmh_write(c->dev, state, cmd, 1); 148 149 return rpmh_write_async(c->dev, state, cmd, 1); 150 } 151 152 static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c) 153 { 154 struct tcs_cmd cmd = { 0 }; 155 u32 cmd_state, on_val; 156 enum rpmh_state state = RPMH_SLEEP_STATE; 157 int ret; 158 bool wait; 159 160 cmd.addr = c->res_addr; 161 cmd_state = c->aggr_state; 162 on_val = c->res_on_val; 163 164 for (; state <= RPMH_ACTIVE_ONLY_STATE; state++) { 165 if (has_state_changed(c, state)) { 166 if (cmd_state & BIT(state)) 167 cmd.data = on_val; 168 169 wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE; 170 ret = clk_rpmh_send(c, state, &cmd, wait); 171 if (ret) { 172 dev_err(c->dev, "set %s state of %s failed: (%d)\n", 173 !state ? "sleep" : 174 state == RPMH_WAKE_ONLY_STATE ? 175 "wake" : "active", c->res_name, ret); 176 return ret; 177 } 178 } 179 } 180 181 c->last_sent_aggr_state = c->aggr_state; 182 c->peer->last_sent_aggr_state = c->last_sent_aggr_state; 183 184 return 0; 185 } 186 187 /* 188 * Update state and aggregate state values based on enable value. 189 */ 190 static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c, 191 bool enable) 192 { 193 int ret; 194 195 c->state = enable ? c->valid_state_mask : 0; 196 c->aggr_state = c->state | c->peer->state; 197 c->peer->aggr_state = c->aggr_state; 198 199 ret = clk_rpmh_send_aggregate_command(c); 200 if (!ret) 201 return 0; 202 203 if (ret && enable) 204 c->state = 0; 205 else if (ret) 206 c->state = c->valid_state_mask; 207 208 WARN(1, "clk: %s failed to %s\n", c->res_name, 209 enable ? "enable" : "disable"); 210 return ret; 211 } 212 213 static int clk_rpmh_prepare(struct clk_hw *hw) 214 { 215 struct clk_rpmh *c = to_clk_rpmh(hw); 216 int ret = 0; 217 218 mutex_lock(&rpmh_clk_lock); 219 ret = clk_rpmh_aggregate_state_send_command(c, true); 220 mutex_unlock(&rpmh_clk_lock); 221 222 return ret; 223 } 224 225 static void clk_rpmh_unprepare(struct clk_hw *hw) 226 { 227 struct clk_rpmh *c = to_clk_rpmh(hw); 228 229 mutex_lock(&rpmh_clk_lock); 230 clk_rpmh_aggregate_state_send_command(c, false); 231 mutex_unlock(&rpmh_clk_lock); 232 }; 233 234 static unsigned long clk_rpmh_recalc_rate(struct clk_hw *hw, 235 unsigned long prate) 236 { 237 struct clk_rpmh *r = to_clk_rpmh(hw); 238 239 /* 240 * RPMh clocks have a fixed rate. Return static rate. 241 */ 242 return prate / r->div; 243 } 244 245 static const struct clk_ops clk_rpmh_ops = { 246 .prepare = clk_rpmh_prepare, 247 .unprepare = clk_rpmh_unprepare, 248 .recalc_rate = clk_rpmh_recalc_rate, 249 }; 250 251 static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable) 252 { 253 struct tcs_cmd cmd = { 0 }; 254 u32 cmd_state; 255 int ret = 0; 256 257 mutex_lock(&rpmh_clk_lock); 258 if (enable) { 259 cmd_state = 1; 260 if (c->aggr_state) 261 cmd_state = c->aggr_state; 262 } else { 263 cmd_state = 0; 264 } 265 266 cmd_state = min(cmd_state, BCM_TCS_CMD_VOTE_MASK); 267 268 if (c->last_sent_aggr_state != cmd_state) { 269 cmd.addr = c->res_addr; 270 cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state); 271 272 /* 273 * Send only an active only state request. RPMh continues to 274 * use the active state when we're in sleep/wake state as long 275 * as the sleep/wake state has never been set. 276 */ 277 ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable); 278 if (ret) { 279 dev_err(c->dev, "set active state of %s failed: (%d)\n", 280 c->res_name, ret); 281 } else { 282 c->last_sent_aggr_state = cmd_state; 283 } 284 } 285 286 mutex_unlock(&rpmh_clk_lock); 287 288 return ret; 289 } 290 291 static int clk_rpmh_bcm_prepare(struct clk_hw *hw) 292 { 293 struct clk_rpmh *c = to_clk_rpmh(hw); 294 295 return clk_rpmh_bcm_send_cmd(c, true); 296 } 297 298 static void clk_rpmh_bcm_unprepare(struct clk_hw *hw) 299 { 300 struct clk_rpmh *c = to_clk_rpmh(hw); 301 302 clk_rpmh_bcm_send_cmd(c, false); 303 } 304 305 static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate, 306 unsigned long parent_rate) 307 { 308 struct clk_rpmh *c = to_clk_rpmh(hw); 309 310 c->aggr_state = rate / c->unit; 311 /* 312 * Since any non-zero value sent to hw would result in enabling the 313 * clock, only send the value if the clock has already been prepared. 314 */ 315 if (clk_hw_is_prepared(hw)) 316 clk_rpmh_bcm_send_cmd(c, true); 317 318 return 0; 319 } 320 321 static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate, 322 unsigned long *parent_rate) 323 { 324 return rate; 325 } 326 327 static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw, 328 unsigned long prate) 329 { 330 struct clk_rpmh *c = to_clk_rpmh(hw); 331 332 return c->aggr_state * c->unit; 333 } 334 335 static const struct clk_ops clk_rpmh_bcm_ops = { 336 .prepare = clk_rpmh_bcm_prepare, 337 .unprepare = clk_rpmh_bcm_unprepare, 338 .set_rate = clk_rpmh_bcm_set_rate, 339 .round_rate = clk_rpmh_round_rate, 340 .recalc_rate = clk_rpmh_bcm_recalc_rate, 341 }; 342 343 /* Resource name must match resource id present in cmd-db */ 344 DEFINE_CLK_RPMH_ARC(bi_tcxo, "xo.lvl", 0x3, 1); 345 DEFINE_CLK_RPMH_ARC(bi_tcxo, "xo.lvl", 0x3, 2); 346 DEFINE_CLK_RPMH_ARC(bi_tcxo, "xo.lvl", 0x3, 4); 347 DEFINE_CLK_RPMH_ARC(qlink, "qphy.lvl", 0x1, 4); 348 349 DEFINE_CLK_RPMH_VRM(ln_bb_clk1, _a2, "lnbclka1", 2); 350 DEFINE_CLK_RPMH_VRM(ln_bb_clk2, _a2, "lnbclka2", 2); 351 DEFINE_CLK_RPMH_VRM(ln_bb_clk3, _a2, "lnbclka3", 2); 352 353 DEFINE_CLK_RPMH_VRM(ln_bb_clk1, _a4, "lnbclka1", 4); 354 DEFINE_CLK_RPMH_VRM(ln_bb_clk2, _a4, "lnbclka2", 4); 355 356 DEFINE_CLK_RPMH_VRM(ln_bb_clk2, _g4, "lnbclkg2", 4); 357 DEFINE_CLK_RPMH_VRM(ln_bb_clk3, _g4, "lnbclkg3", 4); 358 359 DEFINE_CLK_RPMH_VRM(rf_clk1, _a, "rfclka1", 1); 360 DEFINE_CLK_RPMH_VRM(rf_clk2, _a, "rfclka2", 1); 361 DEFINE_CLK_RPMH_VRM(rf_clk3, _a, "rfclka3", 1); 362 DEFINE_CLK_RPMH_VRM(rf_clk4, _a, "rfclka4", 1); 363 DEFINE_CLK_RPMH_VRM(rf_clk5, _a, "rfclka5", 1); 364 365 DEFINE_CLK_RPMH_VRM(rf_clk1, _d, "rfclkd1", 1); 366 DEFINE_CLK_RPMH_VRM(rf_clk2, _d, "rfclkd2", 1); 367 DEFINE_CLK_RPMH_VRM(rf_clk3, _d, "rfclkd3", 1); 368 DEFINE_CLK_RPMH_VRM(rf_clk4, _d, "rfclkd4", 1); 369 370 DEFINE_CLK_RPMH_VRM(clk1, _a1, "clka1", 1); 371 DEFINE_CLK_RPMH_VRM(clk2, _a1, "clka2", 1); 372 DEFINE_CLK_RPMH_VRM(clk3, _a1, "clka3", 1); 373 DEFINE_CLK_RPMH_VRM(clk4, _a1, "clka4", 1); 374 DEFINE_CLK_RPMH_VRM(clk5, _a1, "clka5", 1); 375 376 DEFINE_CLK_RPMH_VRM(clk6, _a2, "clka6", 2); 377 DEFINE_CLK_RPMH_VRM(clk7, _a2, "clka7", 2); 378 DEFINE_CLK_RPMH_VRM(clk8, _a2, "clka8", 2); 379 380 DEFINE_CLK_RPMH_VRM(div_clk1, _div2, "divclka1", 2); 381 382 DEFINE_CLK_RPMH_BCM(ce, "CE0"); 383 DEFINE_CLK_RPMH_BCM(hwkm, "HK0"); 384 DEFINE_CLK_RPMH_BCM(ipa, "IP0"); 385 DEFINE_CLK_RPMH_BCM(pka, "PKA0"); 386 DEFINE_CLK_RPMH_BCM(qpic_clk, "QP0"); 387 388 static struct clk_hw *sdm845_rpmh_clocks[] = { 389 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 390 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 391 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 392 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 393 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 394 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 395 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 396 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 397 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 398 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 399 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 400 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 401 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 402 [RPMH_CE_CLK] = &clk_rpmh_ce.hw, 403 }; 404 405 static const struct clk_rpmh_desc clk_rpmh_sdm845 = { 406 .clks = sdm845_rpmh_clocks, 407 .num_clks = ARRAY_SIZE(sdm845_rpmh_clocks), 408 }; 409 410 static struct clk_hw *sa8775p_rpmh_clocks[] = { 411 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 412 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 413 [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a2.hw, 414 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 415 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a4_ao.hw, 416 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 417 [RPMH_PKA_CLK] = &clk_rpmh_pka.hw, 418 [RPMH_HWKM_CLK] = &clk_rpmh_hwkm.hw, 419 }; 420 421 static const struct clk_rpmh_desc clk_rpmh_sa8775p = { 422 .clks = sa8775p_rpmh_clocks, 423 .num_clks = ARRAY_SIZE(sa8775p_rpmh_clocks), 424 }; 425 426 static struct clk_hw *sdm670_rpmh_clocks[] = { 427 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 428 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 429 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 430 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 431 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 432 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 433 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 434 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 435 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 436 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 437 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 438 [RPMH_CE_CLK] = &clk_rpmh_ce.hw, 439 }; 440 441 static const struct clk_rpmh_desc clk_rpmh_sdm670 = { 442 .clks = sdm670_rpmh_clocks, 443 .num_clks = ARRAY_SIZE(sdm670_rpmh_clocks), 444 }; 445 446 static struct clk_hw *sdx55_rpmh_clocks[] = { 447 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 448 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 449 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_d.hw, 450 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_d_ao.hw, 451 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_d.hw, 452 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_d_ao.hw, 453 [RPMH_QPIC_CLK] = &clk_rpmh_qpic_clk.hw, 454 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 455 }; 456 457 static const struct clk_rpmh_desc clk_rpmh_sdx55 = { 458 .clks = sdx55_rpmh_clocks, 459 .num_clks = ARRAY_SIZE(sdx55_rpmh_clocks), 460 }; 461 462 static struct clk_hw *sm8150_rpmh_clocks[] = { 463 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 464 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 465 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 466 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 467 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 468 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 469 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 470 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 471 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 472 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 473 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 474 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 475 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 476 }; 477 478 static const struct clk_rpmh_desc clk_rpmh_sm8150 = { 479 .clks = sm8150_rpmh_clocks, 480 .num_clks = ARRAY_SIZE(sm8150_rpmh_clocks), 481 }; 482 483 static struct clk_hw *sc7180_rpmh_clocks[] = { 484 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 485 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 486 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 487 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 488 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 489 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 490 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 491 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 492 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 493 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 494 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 495 }; 496 497 static const struct clk_rpmh_desc clk_rpmh_sc7180 = { 498 .clks = sc7180_rpmh_clocks, 499 .num_clks = ARRAY_SIZE(sc7180_rpmh_clocks), 500 }; 501 502 static struct clk_hw *sc8180x_rpmh_clocks[] = { 503 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 504 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 505 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 506 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 507 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 508 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 509 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_d.hw, 510 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_d_ao.hw, 511 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_d.hw, 512 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_d_ao.hw, 513 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_d.hw, 514 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_d_ao.hw, 515 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 516 }; 517 518 static const struct clk_rpmh_desc clk_rpmh_sc8180x = { 519 .clks = sc8180x_rpmh_clocks, 520 .num_clks = ARRAY_SIZE(sc8180x_rpmh_clocks), 521 }; 522 523 static struct clk_hw *sm8250_rpmh_clocks[] = { 524 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 525 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 526 [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a2.hw, 527 [RPMH_LN_BB_CLK1_A] = &clk_rpmh_ln_bb_clk1_a2_ao.hw, 528 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 529 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 530 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 531 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 532 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 533 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 534 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 535 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 536 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 537 }; 538 539 static const struct clk_rpmh_desc clk_rpmh_sm8250 = { 540 .clks = sm8250_rpmh_clocks, 541 .num_clks = ARRAY_SIZE(sm8250_rpmh_clocks), 542 }; 543 544 static struct clk_hw *sm8350_rpmh_clocks[] = { 545 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 546 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 547 [RPMH_DIV_CLK1] = &clk_rpmh_div_clk1_div2.hw, 548 [RPMH_DIV_CLK1_A] = &clk_rpmh_div_clk1_div2_ao.hw, 549 [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a2.hw, 550 [RPMH_LN_BB_CLK1_A] = &clk_rpmh_ln_bb_clk1_a2_ao.hw, 551 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 552 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 553 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 554 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 555 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 556 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 557 [RPMH_RF_CLK4] = &clk_rpmh_rf_clk4_a.hw, 558 [RPMH_RF_CLK4_A] = &clk_rpmh_rf_clk4_a_ao.hw, 559 [RPMH_RF_CLK5] = &clk_rpmh_rf_clk5_a.hw, 560 [RPMH_RF_CLK5_A] = &clk_rpmh_rf_clk5_a_ao.hw, 561 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 562 [RPMH_PKA_CLK] = &clk_rpmh_pka.hw, 563 [RPMH_HWKM_CLK] = &clk_rpmh_hwkm.hw, 564 }; 565 566 static const struct clk_rpmh_desc clk_rpmh_sm8350 = { 567 .clks = sm8350_rpmh_clocks, 568 .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks), 569 }; 570 571 static struct clk_hw *sc8280xp_rpmh_clocks[] = { 572 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 573 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 574 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw, 575 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw, 576 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 577 [RPMH_PKA_CLK] = &clk_rpmh_pka.hw, 578 [RPMH_HWKM_CLK] = &clk_rpmh_hwkm.hw, 579 }; 580 581 static const struct clk_rpmh_desc clk_rpmh_sc8280xp = { 582 .clks = sc8280xp_rpmh_clocks, 583 .num_clks = ARRAY_SIZE(sc8280xp_rpmh_clocks), 584 }; 585 586 static struct clk_hw *sm8450_rpmh_clocks[] = { 587 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw, 588 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw, 589 [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a4.hw, 590 [RPMH_LN_BB_CLK1_A] = &clk_rpmh_ln_bb_clk1_a4_ao.hw, 591 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a4.hw, 592 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a4_ao.hw, 593 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 594 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 595 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 596 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 597 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 598 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 599 [RPMH_RF_CLK4] = &clk_rpmh_rf_clk4_a.hw, 600 [RPMH_RF_CLK4_A] = &clk_rpmh_rf_clk4_a_ao.hw, 601 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 602 }; 603 604 static const struct clk_rpmh_desc clk_rpmh_sm8450 = { 605 .clks = sm8450_rpmh_clocks, 606 .num_clks = ARRAY_SIZE(sm8450_rpmh_clocks), 607 }; 608 609 static struct clk_hw *sm8550_rpmh_clocks[] = { 610 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw, 611 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw, 612 [RPMH_LN_BB_CLK1] = &clk_rpmh_clk6_a2.hw, 613 [RPMH_LN_BB_CLK1_A] = &clk_rpmh_clk6_a2_ao.hw, 614 [RPMH_LN_BB_CLK2] = &clk_rpmh_clk7_a2.hw, 615 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_clk7_a2_ao.hw, 616 [RPMH_LN_BB_CLK3] = &clk_rpmh_clk8_a2.hw, 617 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_clk8_a2_ao.hw, 618 [RPMH_RF_CLK1] = &clk_rpmh_clk1_a1.hw, 619 [RPMH_RF_CLK1_A] = &clk_rpmh_clk1_a1_ao.hw, 620 [RPMH_RF_CLK2] = &clk_rpmh_clk2_a1.hw, 621 [RPMH_RF_CLK2_A] = &clk_rpmh_clk2_a1_ao.hw, 622 [RPMH_RF_CLK3] = &clk_rpmh_clk3_a1.hw, 623 [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a1_ao.hw, 624 [RPMH_RF_CLK4] = &clk_rpmh_clk4_a1.hw, 625 [RPMH_RF_CLK4_A] = &clk_rpmh_clk4_a1_ao.hw, 626 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 627 }; 628 629 static const struct clk_rpmh_desc clk_rpmh_sm8550 = { 630 .clks = sm8550_rpmh_clocks, 631 .num_clks = ARRAY_SIZE(sm8550_rpmh_clocks), 632 }; 633 634 static struct clk_hw *sc7280_rpmh_clocks[] = { 635 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw, 636 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw, 637 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw, 638 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw, 639 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 640 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 641 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 642 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 643 [RPMH_RF_CLK4] = &clk_rpmh_rf_clk4_a.hw, 644 [RPMH_RF_CLK4_A] = &clk_rpmh_rf_clk4_a_ao.hw, 645 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 646 [RPMH_PKA_CLK] = &clk_rpmh_pka.hw, 647 [RPMH_HWKM_CLK] = &clk_rpmh_hwkm.hw, 648 }; 649 650 static const struct clk_rpmh_desc clk_rpmh_sc7280 = { 651 .clks = sc7280_rpmh_clocks, 652 .num_clks = ARRAY_SIZE(sc7280_rpmh_clocks), 653 }; 654 655 static struct clk_hw *sm6350_rpmh_clocks[] = { 656 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw, 657 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw, 658 [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_g4.hw, 659 [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_g4_ao.hw, 660 [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_g4.hw, 661 [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_g4_ao.hw, 662 [RPMH_QLINK_CLK] = &clk_rpmh_qlink_div4.hw, 663 [RPMH_QLINK_CLK_A] = &clk_rpmh_qlink_div4_ao.hw, 664 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 665 }; 666 667 static const struct clk_rpmh_desc clk_rpmh_sm6350 = { 668 .clks = sm6350_rpmh_clocks, 669 .num_clks = ARRAY_SIZE(sm6350_rpmh_clocks), 670 }; 671 672 static struct clk_hw *sdx65_rpmh_clocks[] = { 673 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw, 674 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw, 675 [RPMH_LN_BB_CLK1] = &clk_rpmh_ln_bb_clk1_a4.hw, 676 [RPMH_LN_BB_CLK1_A] = &clk_rpmh_ln_bb_clk1_a4_ao.hw, 677 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 678 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 679 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 680 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 681 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 682 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 683 [RPMH_RF_CLK4] = &clk_rpmh_rf_clk4_a.hw, 684 [RPMH_RF_CLK4_A] = &clk_rpmh_rf_clk4_a_ao.hw, 685 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 686 [RPMH_QPIC_CLK] = &clk_rpmh_qpic_clk.hw, 687 }; 688 689 static const struct clk_rpmh_desc clk_rpmh_sdx65 = { 690 .clks = sdx65_rpmh_clocks, 691 .num_clks = ARRAY_SIZE(sdx65_rpmh_clocks), 692 }; 693 694 static struct clk_hw *qdu1000_rpmh_clocks[] = { 695 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div1.hw, 696 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div1_ao.hw, 697 }; 698 699 static const struct clk_rpmh_desc clk_rpmh_qdu1000 = { 700 .clks = qdu1000_rpmh_clocks, 701 .num_clks = ARRAY_SIZE(qdu1000_rpmh_clocks), 702 }; 703 704 static struct clk_hw *sdx75_rpmh_clocks[] = { 705 [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw, 706 [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw, 707 [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw, 708 [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw, 709 [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw, 710 [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw, 711 [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw, 712 [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw, 713 [RPMH_QPIC_CLK] = &clk_rpmh_qpic_clk.hw, 714 [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw, 715 }; 716 717 static const struct clk_rpmh_desc clk_rpmh_sdx75 = { 718 .clks = sdx75_rpmh_clocks, 719 .num_clks = ARRAY_SIZE(sdx75_rpmh_clocks), 720 }; 721 722 static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec, 723 void *data) 724 { 725 struct clk_rpmh_desc *rpmh = data; 726 unsigned int idx = clkspec->args[0]; 727 728 if (idx >= rpmh->num_clks) { 729 pr_err("%s: invalid index %u\n", __func__, idx); 730 return ERR_PTR(-EINVAL); 731 } 732 733 return rpmh->clks[idx]; 734 } 735 736 static int clk_rpmh_probe(struct platform_device *pdev) 737 { 738 struct clk_hw **hw_clks; 739 struct clk_rpmh *rpmh_clk; 740 const struct clk_rpmh_desc *desc; 741 int ret, i; 742 743 desc = of_device_get_match_data(&pdev->dev); 744 if (!desc) 745 return -ENODEV; 746 747 hw_clks = desc->clks; 748 749 for (i = 0; i < desc->num_clks; i++) { 750 const char *name; 751 u32 res_addr; 752 size_t aux_data_len; 753 const struct bcm_db *data; 754 755 if (!hw_clks[i]) 756 continue; 757 758 name = hw_clks[i]->init->name; 759 760 rpmh_clk = to_clk_rpmh(hw_clks[i]); 761 res_addr = cmd_db_read_addr(rpmh_clk->res_name); 762 if (!res_addr) { 763 dev_err(&pdev->dev, "missing RPMh resource address for %s\n", 764 rpmh_clk->res_name); 765 return -ENODEV; 766 } 767 768 data = cmd_db_read_aux_data(rpmh_clk->res_name, &aux_data_len); 769 if (IS_ERR(data)) { 770 ret = PTR_ERR(data); 771 dev_err(&pdev->dev, 772 "error reading RPMh aux data for %s (%d)\n", 773 rpmh_clk->res_name, ret); 774 return ret; 775 } 776 777 /* Convert unit from Khz to Hz */ 778 if (aux_data_len == sizeof(*data)) 779 rpmh_clk->unit = le32_to_cpu(data->unit) * 1000ULL; 780 781 rpmh_clk->res_addr += res_addr; 782 rpmh_clk->dev = &pdev->dev; 783 784 ret = devm_clk_hw_register(&pdev->dev, hw_clks[i]); 785 if (ret) { 786 dev_err(&pdev->dev, "failed to register %s\n", name); 787 return ret; 788 } 789 } 790 791 /* typecast to silence compiler warning */ 792 ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_rpmh_hw_get, 793 (void *)desc); 794 if (ret) { 795 dev_err(&pdev->dev, "Failed to add clock provider\n"); 796 return ret; 797 } 798 799 dev_dbg(&pdev->dev, "Registered RPMh clocks\n"); 800 801 return 0; 802 } 803 804 static const struct of_device_id clk_rpmh_match_table[] = { 805 { .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000}, 806 { .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p}, 807 { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180}, 808 { .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x}, 809 { .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp}, 810 { .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845}, 811 { .compatible = "qcom,sdm670-rpmh-clk", .data = &clk_rpmh_sdm670}, 812 { .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55}, 813 { .compatible = "qcom,sdx65-rpmh-clk", .data = &clk_rpmh_sdx65}, 814 { .compatible = "qcom,sdx75-rpmh-clk", .data = &clk_rpmh_sdx75}, 815 { .compatible = "qcom,sm6350-rpmh-clk", .data = &clk_rpmh_sm6350}, 816 { .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150}, 817 { .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250}, 818 { .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350}, 819 { .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450}, 820 { .compatible = "qcom,sm8550-rpmh-clk", .data = &clk_rpmh_sm8550}, 821 { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280}, 822 { } 823 }; 824 MODULE_DEVICE_TABLE(of, clk_rpmh_match_table); 825 826 static struct platform_driver clk_rpmh_driver = { 827 .probe = clk_rpmh_probe, 828 .driver = { 829 .name = "clk-rpmh", 830 .of_match_table = clk_rpmh_match_table, 831 }, 832 }; 833 834 static int __init clk_rpmh_init(void) 835 { 836 return platform_driver_register(&clk_rpmh_driver); 837 } 838 core_initcall(clk_rpmh_init); 839 840 static void __exit clk_rpmh_exit(void) 841 { 842 platform_driver_unregister(&clk_rpmh_driver); 843 } 844 module_exit(clk_rpmh_exit); 845 846 MODULE_DESCRIPTION("QCOM RPMh Clock Driver"); 847 MODULE_LICENSE("GPL v2"); 848