1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/bitops.h> 7 #include <linux/delay.h> 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/jiffies.h> 11 #include <linux/kernel.h> 12 #include <linux/ktime.h> 13 #include <linux/pm_domain.h> 14 #include <linux/regmap.h> 15 #include <linux/regulator/consumer.h> 16 #include <linux/reset-controller.h> 17 #include <linux/slab.h> 18 #include "gdsc.h" 19 20 #define PWR_ON_MASK BIT(31) 21 #define EN_REST_WAIT_MASK GENMASK_ULL(23, 20) 22 #define EN_FEW_WAIT_MASK GENMASK_ULL(19, 16) 23 #define CLK_DIS_WAIT_MASK GENMASK_ULL(15, 12) 24 #define SW_OVERRIDE_MASK BIT(2) 25 #define HW_CONTROL_MASK BIT(1) 26 #define SW_COLLAPSE_MASK BIT(0) 27 #define GMEM_CLAMP_IO_MASK BIT(0) 28 #define GMEM_RESET_MASK BIT(4) 29 30 /* CFG_GDSCR */ 31 #define GDSC_POWER_UP_COMPLETE BIT(16) 32 #define GDSC_POWER_DOWN_COMPLETE BIT(15) 33 #define GDSC_RETAIN_FF_ENABLE BIT(11) 34 #define CFG_GDSCR_OFFSET 0x4 35 36 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ 37 #define EN_REST_WAIT_VAL 0x2 38 #define EN_FEW_WAIT_VAL 0x8 39 #define CLK_DIS_WAIT_VAL 0x2 40 41 /* Transition delay shifts */ 42 #define EN_REST_WAIT_SHIFT 20 43 #define EN_FEW_WAIT_SHIFT 16 44 #define CLK_DIS_WAIT_SHIFT 12 45 46 #define RETAIN_MEM BIT(14) 47 #define RETAIN_PERIPH BIT(13) 48 49 #define STATUS_POLL_TIMEOUT_US 1500 50 #define TIMEOUT_US 500 51 52 #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd) 53 54 enum gdsc_status { 55 GDSC_OFF, 56 GDSC_ON 57 }; 58 59 /* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */ 60 static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status) 61 { 62 unsigned int reg; 63 u32 val; 64 int ret; 65 66 if (sc->flags & POLL_CFG_GDSCR) 67 reg = sc->gdscr + CFG_GDSCR_OFFSET; 68 else if (sc->gds_hw_ctrl) 69 reg = sc->gds_hw_ctrl; 70 else 71 reg = sc->gdscr; 72 73 ret = regmap_read(sc->regmap, reg, &val); 74 if (ret) 75 return ret; 76 77 if (sc->flags & POLL_CFG_GDSCR) { 78 switch (status) { 79 case GDSC_ON: 80 return !!(val & GDSC_POWER_UP_COMPLETE); 81 case GDSC_OFF: 82 return !!(val & GDSC_POWER_DOWN_COMPLETE); 83 } 84 } 85 86 switch (status) { 87 case GDSC_ON: 88 return !!(val & PWR_ON_MASK); 89 case GDSC_OFF: 90 return !(val & PWR_ON_MASK); 91 } 92 93 return -EINVAL; 94 } 95 96 static int gdsc_hwctrl(struct gdsc *sc, bool en) 97 { 98 u32 val = en ? HW_CONTROL_MASK : 0; 99 100 return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val); 101 } 102 103 static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status) 104 { 105 ktime_t start; 106 107 start = ktime_get(); 108 do { 109 if (gdsc_check_status(sc, status)) 110 return 0; 111 } while (ktime_us_delta(ktime_get(), start) < STATUS_POLL_TIMEOUT_US); 112 113 if (gdsc_check_status(sc, status)) 114 return 0; 115 116 return -ETIMEDOUT; 117 } 118 119 static int gdsc_update_collapse_bit(struct gdsc *sc, bool val) 120 { 121 u32 reg, mask; 122 int ret; 123 124 if (sc->collapse_mask) { 125 reg = sc->collapse_ctrl; 126 mask = sc->collapse_mask; 127 } else { 128 reg = sc->gdscr; 129 mask = SW_COLLAPSE_MASK; 130 } 131 132 ret = regmap_update_bits(sc->regmap, reg, mask, val ? mask : 0); 133 if (ret) 134 return ret; 135 136 return 0; 137 } 138 139 static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status) 140 { 141 int ret; 142 143 if (status == GDSC_ON && sc->rsupply) { 144 ret = regulator_enable(sc->rsupply); 145 if (ret < 0) 146 return ret; 147 } 148 149 ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF); 150 151 /* If disabling votable gdscs, don't poll on status */ 152 if ((sc->flags & VOTABLE) && status == GDSC_OFF) { 153 /* 154 * Add a short delay here to ensure that an enable 155 * right after it was disabled does not put it in an 156 * unknown state 157 */ 158 udelay(TIMEOUT_US); 159 return 0; 160 } 161 162 if (sc->gds_hw_ctrl) { 163 /* 164 * The gds hw controller asserts/de-asserts the status bit soon 165 * after it receives a power on/off request from a master. 166 * The controller then takes around 8 xo cycles to start its 167 * internal state machine and update the status bit. During 168 * this time, the status bit does not reflect the true status 169 * of the core. 170 * Add a delay of 1 us between writing to the SW_COLLAPSE bit 171 * and polling the status bit. 172 */ 173 udelay(1); 174 } 175 176 ret = gdsc_poll_status(sc, status); 177 WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n"); 178 179 if (!ret && status == GDSC_OFF && sc->rsupply) { 180 ret = regulator_disable(sc->rsupply); 181 if (ret < 0) 182 return ret; 183 } 184 185 return ret; 186 } 187 188 static inline int gdsc_deassert_reset(struct gdsc *sc) 189 { 190 int i; 191 192 for (i = 0; i < sc->reset_count; i++) 193 sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]); 194 return 0; 195 } 196 197 static inline int gdsc_assert_reset(struct gdsc *sc) 198 { 199 int i; 200 201 for (i = 0; i < sc->reset_count; i++) 202 sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]); 203 return 0; 204 } 205 206 static inline void gdsc_force_mem_on(struct gdsc *sc) 207 { 208 int i; 209 u32 mask = RETAIN_MEM; 210 211 if (!(sc->flags & NO_RET_PERIPH)) 212 mask |= RETAIN_PERIPH; 213 214 for (i = 0; i < sc->cxc_count; i++) 215 regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask); 216 } 217 218 static inline void gdsc_clear_mem_on(struct gdsc *sc) 219 { 220 int i; 221 u32 mask = RETAIN_MEM; 222 223 if (!(sc->flags & NO_RET_PERIPH)) 224 mask |= RETAIN_PERIPH; 225 226 for (i = 0; i < sc->cxc_count; i++) 227 regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0); 228 } 229 230 static inline void gdsc_deassert_clamp_io(struct gdsc *sc) 231 { 232 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 233 GMEM_CLAMP_IO_MASK, 0); 234 } 235 236 static inline void gdsc_assert_clamp_io(struct gdsc *sc) 237 { 238 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 239 GMEM_CLAMP_IO_MASK, 1); 240 } 241 242 static inline void gdsc_assert_reset_aon(struct gdsc *sc) 243 { 244 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 245 GMEM_RESET_MASK, 1); 246 udelay(1); 247 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 248 GMEM_RESET_MASK, 0); 249 } 250 251 static void gdsc_retain_ff_on(struct gdsc *sc) 252 { 253 u32 mask = GDSC_RETAIN_FF_ENABLE; 254 255 regmap_update_bits(sc->regmap, sc->gdscr, mask, mask); 256 } 257 258 static int gdsc_enable(struct generic_pm_domain *domain) 259 { 260 struct gdsc *sc = domain_to_gdsc(domain); 261 int ret; 262 263 if (sc->pwrsts == PWRSTS_ON) 264 return gdsc_deassert_reset(sc); 265 266 if (sc->flags & SW_RESET) { 267 gdsc_assert_reset(sc); 268 udelay(1); 269 gdsc_deassert_reset(sc); 270 } 271 272 if (sc->flags & CLAMP_IO) { 273 if (sc->flags & AON_RESET) 274 gdsc_assert_reset_aon(sc); 275 gdsc_deassert_clamp_io(sc); 276 } 277 278 ret = gdsc_toggle_logic(sc, GDSC_ON); 279 if (ret) 280 return ret; 281 282 if (sc->pwrsts & PWRSTS_OFF) 283 gdsc_force_mem_on(sc); 284 285 /* 286 * If clocks to this power domain were already on, they will take an 287 * additional 4 clock cycles to re-enable after the power domain is 288 * enabled. Delay to account for this. A delay is also needed to ensure 289 * clocks are not enabled within 400ns of enabling power to the 290 * memories. 291 */ 292 udelay(1); 293 294 /* Turn on HW trigger mode if supported */ 295 if (sc->flags & HW_CTRL) { 296 ret = gdsc_hwctrl(sc, true); 297 if (ret) 298 return ret; 299 /* 300 * Wait for the GDSC to go through a power down and 301 * up cycle. In case a firmware ends up polling status 302 * bits for the gdsc, it might read an 'on' status before 303 * the GDSC can finish the power cycle. 304 * We wait 1us before returning to ensure the firmware 305 * can't immediately poll the status bits. 306 */ 307 udelay(1); 308 } 309 310 if (sc->flags & RETAIN_FF_ENABLE) 311 gdsc_retain_ff_on(sc); 312 313 return 0; 314 } 315 316 static int gdsc_disable(struct generic_pm_domain *domain) 317 { 318 struct gdsc *sc = domain_to_gdsc(domain); 319 int ret; 320 321 if (sc->pwrsts == PWRSTS_ON) 322 return gdsc_assert_reset(sc); 323 324 /* Turn off HW trigger mode if supported */ 325 if (sc->flags & HW_CTRL) { 326 ret = gdsc_hwctrl(sc, false); 327 if (ret < 0) 328 return ret; 329 /* 330 * Wait for the GDSC to go through a power down and 331 * up cycle. In case we end up polling status 332 * bits for the gdsc before the power cycle is completed 333 * it might read an 'on' status wrongly. 334 */ 335 udelay(1); 336 337 ret = gdsc_poll_status(sc, GDSC_ON); 338 if (ret) 339 return ret; 340 } 341 342 if (sc->pwrsts & PWRSTS_OFF) 343 gdsc_clear_mem_on(sc); 344 345 /* 346 * If the GDSC supports only a Retention state, apart from ON, 347 * leave it in ON state. 348 * There is no SW control to transition the GDSC into 349 * Retention state. This happens in HW when the parent 350 * domain goes down to a Low power state 351 */ 352 if (sc->pwrsts == PWRSTS_RET_ON) 353 return 0; 354 355 ret = gdsc_toggle_logic(sc, GDSC_OFF); 356 if (ret) 357 return ret; 358 359 if (sc->flags & CLAMP_IO) 360 gdsc_assert_clamp_io(sc); 361 362 return 0; 363 } 364 365 static int gdsc_init(struct gdsc *sc) 366 { 367 u32 mask, val; 368 int on, ret; 369 370 /* 371 * Disable HW trigger: collapse/restore occur based on registers writes. 372 * Disable SW override: Use hardware state-machine for sequencing. 373 * Configure wait time between states. 374 */ 375 mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | 376 EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; 377 378 if (!sc->en_rest_wait_val) 379 sc->en_rest_wait_val = EN_REST_WAIT_VAL; 380 if (!sc->en_few_wait_val) 381 sc->en_few_wait_val = EN_FEW_WAIT_VAL; 382 if (!sc->clk_dis_wait_val) 383 sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL; 384 385 val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT | 386 sc->en_few_wait_val << EN_FEW_WAIT_SHIFT | 387 sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT; 388 389 ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); 390 if (ret) 391 return ret; 392 393 /* Force gdsc ON if only ON state is supported */ 394 if (sc->pwrsts == PWRSTS_ON) { 395 ret = gdsc_toggle_logic(sc, GDSC_ON); 396 if (ret) 397 return ret; 398 } 399 400 on = gdsc_check_status(sc, GDSC_ON); 401 if (on < 0) 402 return on; 403 404 if (on) { 405 /* The regulator must be on, sync the kernel state */ 406 if (sc->rsupply) { 407 ret = regulator_enable(sc->rsupply); 408 if (ret < 0) 409 return ret; 410 } 411 412 /* 413 * Votable GDSCs can be ON due to Vote from other masters. 414 * If a Votable GDSC is ON, make sure we have a Vote. 415 */ 416 if (sc->flags & VOTABLE) { 417 ret = gdsc_update_collapse_bit(sc, false); 418 if (ret) 419 goto err_disable_supply; 420 } 421 422 /* Turn on HW trigger mode if supported */ 423 if (sc->flags & HW_CTRL) { 424 ret = gdsc_hwctrl(sc, true); 425 if (ret < 0) 426 goto err_disable_supply; 427 } 428 429 /* 430 * Make sure the retain bit is set if the GDSC is already on, 431 * otherwise we end up turning off the GDSC and destroying all 432 * the register contents that we thought we were saving. 433 */ 434 if (sc->flags & RETAIN_FF_ENABLE) 435 gdsc_retain_ff_on(sc); 436 } else if (sc->flags & ALWAYS_ON) { 437 /* If ALWAYS_ON GDSCs are not ON, turn them ON */ 438 gdsc_enable(&sc->pd); 439 on = true; 440 } 441 442 if (on || (sc->pwrsts & PWRSTS_RET)) 443 gdsc_force_mem_on(sc); 444 else 445 gdsc_clear_mem_on(sc); 446 447 if (sc->flags & ALWAYS_ON) 448 sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; 449 if (!sc->pd.power_off) 450 sc->pd.power_off = gdsc_disable; 451 if (!sc->pd.power_on) 452 sc->pd.power_on = gdsc_enable; 453 454 ret = pm_genpd_init(&sc->pd, NULL, !on); 455 if (ret) 456 goto err_disable_supply; 457 458 return 0; 459 460 err_disable_supply: 461 if (on && sc->rsupply) 462 regulator_disable(sc->rsupply); 463 464 return ret; 465 } 466 467 int gdsc_register(struct gdsc_desc *desc, 468 struct reset_controller_dev *rcdev, struct regmap *regmap) 469 { 470 int i, ret; 471 struct genpd_onecell_data *data; 472 struct device *dev = desc->dev; 473 struct gdsc **scs = desc->scs; 474 size_t num = desc->num; 475 476 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 477 if (!data) 478 return -ENOMEM; 479 480 data->domains = devm_kcalloc(dev, num, sizeof(*data->domains), 481 GFP_KERNEL); 482 if (!data->domains) 483 return -ENOMEM; 484 485 for (i = 0; i < num; i++) { 486 if (!scs[i] || !scs[i]->supply) 487 continue; 488 489 scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply); 490 if (IS_ERR(scs[i]->rsupply)) 491 return PTR_ERR(scs[i]->rsupply); 492 } 493 494 data->num_domains = num; 495 for (i = 0; i < num; i++) { 496 if (!scs[i]) 497 continue; 498 scs[i]->regmap = regmap; 499 scs[i]->rcdev = rcdev; 500 ret = gdsc_init(scs[i]); 501 if (ret) 502 return ret; 503 data->domains[i] = &scs[i]->pd; 504 } 505 506 /* Add subdomains */ 507 for (i = 0; i < num; i++) { 508 if (!scs[i]) 509 continue; 510 if (scs[i]->parent) 511 pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd); 512 else if (!IS_ERR_OR_NULL(dev->pm_domain)) 513 pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); 514 } 515 516 return of_genpd_add_provider_onecell(dev->of_node, data); 517 } 518 519 void gdsc_unregister(struct gdsc_desc *desc) 520 { 521 int i; 522 struct device *dev = desc->dev; 523 struct gdsc **scs = desc->scs; 524 size_t num = desc->num; 525 526 /* Remove subdomains */ 527 for (i = 0; i < num; i++) { 528 if (!scs[i]) 529 continue; 530 if (scs[i]->parent) 531 pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd); 532 else if (!IS_ERR_OR_NULL(dev->pm_domain)) 533 pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); 534 } 535 of_genpd_del_provider(dev->of_node); 536 } 537 538 /* 539 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU 540 * running in the CX domain so the CPU doesn't need to know anything about the 541 * GX domain EXCEPT.... 542 * 543 * Hardware constraints dictate that the GX be powered down before the CX. If 544 * the GMU crashes it could leave the GX on. In order to successfully bring back 545 * the device the CPU needs to disable the GX headswitch. There being no sane 546 * way to reach in and touch that register from deep inside the GPU driver we 547 * need to set up the infrastructure to be able to ensure that the GPU can 548 * ensure that the GX is off during this super special case. We do this by 549 * defining a GX gdsc with a dummy enable function and a "default" disable 550 * function. 551 * 552 * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU 553 * driver. During power up, nothing will happen from the CPU (and the GMU will 554 * power up normally but during power down this will ensure that the GX domain 555 * is *really* off - this gives us a semi standard way of doing what we need. 556 */ 557 int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain) 558 { 559 /* Do nothing but give genpd the impression that we were successful */ 560 return 0; 561 } 562 EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable); 563