1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/bitops.h> 7 #include <linux/delay.h> 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/jiffies.h> 11 #include <linux/kernel.h> 12 #include <linux/ktime.h> 13 #include <linux/pm_domain.h> 14 #include <linux/regmap.h> 15 #include <linux/regulator/consumer.h> 16 #include <linux/reset-controller.h> 17 #include <linux/slab.h> 18 #include "gdsc.h" 19 20 #define PWR_ON_MASK BIT(31) 21 #define EN_REST_WAIT_MASK GENMASK_ULL(23, 20) 22 #define EN_FEW_WAIT_MASK GENMASK_ULL(19, 16) 23 #define CLK_DIS_WAIT_MASK GENMASK_ULL(15, 12) 24 #define SW_OVERRIDE_MASK BIT(2) 25 #define HW_CONTROL_MASK BIT(1) 26 #define SW_COLLAPSE_MASK BIT(0) 27 #define GMEM_CLAMP_IO_MASK BIT(0) 28 #define GMEM_RESET_MASK BIT(4) 29 30 /* CFG_GDSCR */ 31 #define GDSC_POWER_UP_COMPLETE BIT(16) 32 #define GDSC_POWER_DOWN_COMPLETE BIT(15) 33 #define GDSC_RETAIN_FF_ENABLE BIT(11) 34 #define CFG_GDSCR_OFFSET 0x4 35 36 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ 37 #define EN_REST_WAIT_VAL (0x2 << 20) 38 #define EN_FEW_WAIT_VAL (0x8 << 16) 39 #define CLK_DIS_WAIT_VAL (0x2 << 12) 40 41 #define RETAIN_MEM BIT(14) 42 #define RETAIN_PERIPH BIT(13) 43 44 #define TIMEOUT_US 500 45 46 #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd) 47 48 enum gdsc_status { 49 GDSC_OFF, 50 GDSC_ON 51 }; 52 53 /* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */ 54 static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status) 55 { 56 unsigned int reg; 57 u32 val; 58 int ret; 59 60 if (sc->flags & POLL_CFG_GDSCR) 61 reg = sc->gdscr + CFG_GDSCR_OFFSET; 62 else if (sc->gds_hw_ctrl) 63 reg = sc->gds_hw_ctrl; 64 else 65 reg = sc->gdscr; 66 67 ret = regmap_read(sc->regmap, reg, &val); 68 if (ret) 69 return ret; 70 71 if (sc->flags & POLL_CFG_GDSCR) { 72 switch (status) { 73 case GDSC_ON: 74 return !!(val & GDSC_POWER_UP_COMPLETE); 75 case GDSC_OFF: 76 return !!(val & GDSC_POWER_DOWN_COMPLETE); 77 } 78 } 79 80 switch (status) { 81 case GDSC_ON: 82 return !!(val & PWR_ON_MASK); 83 case GDSC_OFF: 84 return !(val & PWR_ON_MASK); 85 } 86 87 return -EINVAL; 88 } 89 90 static int gdsc_hwctrl(struct gdsc *sc, bool en) 91 { 92 u32 val = en ? HW_CONTROL_MASK : 0; 93 94 return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val); 95 } 96 97 static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status) 98 { 99 ktime_t start; 100 101 start = ktime_get(); 102 do { 103 if (gdsc_check_status(sc, status)) 104 return 0; 105 } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US); 106 107 if (gdsc_check_status(sc, status)) 108 return 0; 109 110 return -ETIMEDOUT; 111 } 112 113 static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status) 114 { 115 int ret; 116 u32 val = (status == GDSC_ON) ? 0 : SW_COLLAPSE_MASK; 117 118 if (status == GDSC_ON && sc->rsupply) { 119 ret = regulator_enable(sc->rsupply); 120 if (ret < 0) 121 return ret; 122 } 123 124 ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val); 125 if (ret) 126 return ret; 127 128 /* If disabling votable gdscs, don't poll on status */ 129 if ((sc->flags & VOTABLE) && status == GDSC_OFF) { 130 /* 131 * Add a short delay here to ensure that an enable 132 * right after it was disabled does not put it in an 133 * unknown state 134 */ 135 udelay(TIMEOUT_US); 136 return 0; 137 } 138 139 if (sc->gds_hw_ctrl) { 140 /* 141 * The gds hw controller asserts/de-asserts the status bit soon 142 * after it receives a power on/off request from a master. 143 * The controller then takes around 8 xo cycles to start its 144 * internal state machine and update the status bit. During 145 * this time, the status bit does not reflect the true status 146 * of the core. 147 * Add a delay of 1 us between writing to the SW_COLLAPSE bit 148 * and polling the status bit. 149 */ 150 udelay(1); 151 } 152 153 ret = gdsc_poll_status(sc, status); 154 WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n"); 155 156 if (!ret && status == GDSC_OFF && sc->rsupply) { 157 ret = regulator_disable(sc->rsupply); 158 if (ret < 0) 159 return ret; 160 } 161 162 return ret; 163 } 164 165 static inline int gdsc_deassert_reset(struct gdsc *sc) 166 { 167 int i; 168 169 for (i = 0; i < sc->reset_count; i++) 170 sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]); 171 return 0; 172 } 173 174 static inline int gdsc_assert_reset(struct gdsc *sc) 175 { 176 int i; 177 178 for (i = 0; i < sc->reset_count; i++) 179 sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]); 180 return 0; 181 } 182 183 static inline void gdsc_force_mem_on(struct gdsc *sc) 184 { 185 int i; 186 u32 mask = RETAIN_MEM; 187 188 if (!(sc->flags & NO_RET_PERIPH)) 189 mask |= RETAIN_PERIPH; 190 191 for (i = 0; i < sc->cxc_count; i++) 192 regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask); 193 } 194 195 static inline void gdsc_clear_mem_on(struct gdsc *sc) 196 { 197 int i; 198 u32 mask = RETAIN_MEM; 199 200 if (!(sc->flags & NO_RET_PERIPH)) 201 mask |= RETAIN_PERIPH; 202 203 for (i = 0; i < sc->cxc_count; i++) 204 regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0); 205 } 206 207 static inline void gdsc_deassert_clamp_io(struct gdsc *sc) 208 { 209 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 210 GMEM_CLAMP_IO_MASK, 0); 211 } 212 213 static inline void gdsc_assert_clamp_io(struct gdsc *sc) 214 { 215 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 216 GMEM_CLAMP_IO_MASK, 1); 217 } 218 219 static inline void gdsc_assert_reset_aon(struct gdsc *sc) 220 { 221 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 222 GMEM_RESET_MASK, 1); 223 udelay(1); 224 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 225 GMEM_RESET_MASK, 0); 226 } 227 228 static void gdsc_retain_ff_on(struct gdsc *sc) 229 { 230 u32 mask = GDSC_RETAIN_FF_ENABLE; 231 232 regmap_update_bits(sc->regmap, sc->gdscr, mask, mask); 233 } 234 235 static int gdsc_enable(struct generic_pm_domain *domain) 236 { 237 struct gdsc *sc = domain_to_gdsc(domain); 238 int ret; 239 240 if (sc->pwrsts == PWRSTS_ON) 241 return gdsc_deassert_reset(sc); 242 243 if (sc->flags & SW_RESET) { 244 gdsc_assert_reset(sc); 245 udelay(1); 246 gdsc_deassert_reset(sc); 247 } 248 249 if (sc->flags & CLAMP_IO) { 250 if (sc->flags & AON_RESET) 251 gdsc_assert_reset_aon(sc); 252 gdsc_deassert_clamp_io(sc); 253 } 254 255 ret = gdsc_toggle_logic(sc, GDSC_ON); 256 if (ret) 257 return ret; 258 259 if (sc->pwrsts & PWRSTS_OFF) 260 gdsc_force_mem_on(sc); 261 262 /* 263 * If clocks to this power domain were already on, they will take an 264 * additional 4 clock cycles to re-enable after the power domain is 265 * enabled. Delay to account for this. A delay is also needed to ensure 266 * clocks are not enabled within 400ns of enabling power to the 267 * memories. 268 */ 269 udelay(1); 270 271 /* Turn on HW trigger mode if supported */ 272 if (sc->flags & HW_CTRL) { 273 ret = gdsc_hwctrl(sc, true); 274 if (ret) 275 return ret; 276 /* 277 * Wait for the GDSC to go through a power down and 278 * up cycle. In case a firmware ends up polling status 279 * bits for the gdsc, it might read an 'on' status before 280 * the GDSC can finish the power cycle. 281 * We wait 1us before returning to ensure the firmware 282 * can't immediately poll the status bits. 283 */ 284 udelay(1); 285 } 286 287 if (sc->flags & RETAIN_FF_ENABLE) 288 gdsc_retain_ff_on(sc); 289 290 return 0; 291 } 292 293 static int gdsc_disable(struct generic_pm_domain *domain) 294 { 295 struct gdsc *sc = domain_to_gdsc(domain); 296 int ret; 297 298 if (sc->pwrsts == PWRSTS_ON) 299 return gdsc_assert_reset(sc); 300 301 /* Turn off HW trigger mode if supported */ 302 if (sc->flags & HW_CTRL) { 303 ret = gdsc_hwctrl(sc, false); 304 if (ret < 0) 305 return ret; 306 /* 307 * Wait for the GDSC to go through a power down and 308 * up cycle. In case we end up polling status 309 * bits for the gdsc before the power cycle is completed 310 * it might read an 'on' status wrongly. 311 */ 312 udelay(1); 313 314 ret = gdsc_poll_status(sc, GDSC_ON); 315 if (ret) 316 return ret; 317 } 318 319 if (sc->pwrsts & PWRSTS_OFF) 320 gdsc_clear_mem_on(sc); 321 322 ret = gdsc_toggle_logic(sc, GDSC_OFF); 323 if (ret) 324 return ret; 325 326 if (sc->flags & CLAMP_IO) 327 gdsc_assert_clamp_io(sc); 328 329 return 0; 330 } 331 332 static int gdsc_init(struct gdsc *sc) 333 { 334 u32 mask, val; 335 int on, ret; 336 337 /* 338 * Disable HW trigger: collapse/restore occur based on registers writes. 339 * Disable SW override: Use hardware state-machine for sequencing. 340 * Configure wait time between states. 341 */ 342 mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | 343 EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; 344 val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; 345 ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); 346 if (ret) 347 return ret; 348 349 /* Force gdsc ON if only ON state is supported */ 350 if (sc->pwrsts == PWRSTS_ON) { 351 ret = gdsc_toggle_logic(sc, GDSC_ON); 352 if (ret) 353 return ret; 354 } 355 356 on = gdsc_check_status(sc, GDSC_ON); 357 if (on < 0) 358 return on; 359 360 if (on) { 361 /* The regulator must be on, sync the kernel state */ 362 if (sc->rsupply) { 363 ret = regulator_enable(sc->rsupply); 364 if (ret < 0) 365 return ret; 366 } 367 368 /* 369 * Votable GDSCs can be ON due to Vote from other masters. 370 * If a Votable GDSC is ON, make sure we have a Vote. 371 */ 372 if (sc->flags & VOTABLE) { 373 ret = regmap_update_bits(sc->regmap, sc->gdscr, 374 SW_COLLAPSE_MASK, val); 375 if (ret) 376 return ret; 377 } 378 379 /* Turn on HW trigger mode if supported */ 380 if (sc->flags & HW_CTRL) { 381 ret = gdsc_hwctrl(sc, true); 382 if (ret < 0) 383 return ret; 384 } 385 386 /* 387 * Make sure the retain bit is set if the GDSC is already on, 388 * otherwise we end up turning off the GDSC and destroying all 389 * the register contents that we thought we were saving. 390 */ 391 if (sc->flags & RETAIN_FF_ENABLE) 392 gdsc_retain_ff_on(sc); 393 } else if (sc->flags & ALWAYS_ON) { 394 /* If ALWAYS_ON GDSCs are not ON, turn them ON */ 395 gdsc_enable(&sc->pd); 396 on = true; 397 } 398 399 if (on || (sc->pwrsts & PWRSTS_RET)) 400 gdsc_force_mem_on(sc); 401 else 402 gdsc_clear_mem_on(sc); 403 404 if (sc->flags & ALWAYS_ON) 405 sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; 406 if (!sc->pd.power_off) 407 sc->pd.power_off = gdsc_disable; 408 if (!sc->pd.power_on) 409 sc->pd.power_on = gdsc_enable; 410 pm_genpd_init(&sc->pd, NULL, !on); 411 412 return 0; 413 } 414 415 int gdsc_register(struct gdsc_desc *desc, 416 struct reset_controller_dev *rcdev, struct regmap *regmap) 417 { 418 int i, ret; 419 struct genpd_onecell_data *data; 420 struct device *dev = desc->dev; 421 struct gdsc **scs = desc->scs; 422 size_t num = desc->num; 423 424 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 425 if (!data) 426 return -ENOMEM; 427 428 data->domains = devm_kcalloc(dev, num, sizeof(*data->domains), 429 GFP_KERNEL); 430 if (!data->domains) 431 return -ENOMEM; 432 433 for (i = 0; i < num; i++) { 434 if (!scs[i] || !scs[i]->supply) 435 continue; 436 437 scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply); 438 if (IS_ERR(scs[i]->rsupply)) 439 return PTR_ERR(scs[i]->rsupply); 440 } 441 442 data->num_domains = num; 443 for (i = 0; i < num; i++) { 444 if (!scs[i]) 445 continue; 446 scs[i]->regmap = regmap; 447 scs[i]->rcdev = rcdev; 448 ret = gdsc_init(scs[i]); 449 if (ret) 450 return ret; 451 data->domains[i] = &scs[i]->pd; 452 } 453 454 /* Add subdomains */ 455 for (i = 0; i < num; i++) { 456 if (!scs[i]) 457 continue; 458 if (scs[i]->parent) 459 pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd); 460 } 461 462 return of_genpd_add_provider_onecell(dev->of_node, data); 463 } 464 465 void gdsc_unregister(struct gdsc_desc *desc) 466 { 467 int i; 468 struct device *dev = desc->dev; 469 struct gdsc **scs = desc->scs; 470 size_t num = desc->num; 471 472 /* Remove subdomains */ 473 for (i = 0; i < num; i++) { 474 if (!scs[i]) 475 continue; 476 if (scs[i]->parent) 477 pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd); 478 } 479 of_genpd_del_provider(dev->of_node); 480 } 481 482 /* 483 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU 484 * running in the CX domain so the CPU doesn't need to know anything about the 485 * GX domain EXCEPT.... 486 * 487 * Hardware constraints dictate that the GX be powered down before the CX. If 488 * the GMU crashes it could leave the GX on. In order to successfully bring back 489 * the device the CPU needs to disable the GX headswitch. There being no sane 490 * way to reach in and touch that register from deep inside the GPU driver we 491 * need to set up the infrastructure to be able to ensure that the GPU can 492 * ensure that the GX is off during this super special case. We do this by 493 * defining a GX gdsc with a dummy enable function and a "default" disable 494 * function. 495 * 496 * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU 497 * driver. During power up, nothing will happen from the CPU (and the GMU will 498 * power up normally but during power down this will ensure that the GX domain 499 * is *really* off - this gives us a semi standard way of doing what we need. 500 */ 501 int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain) 502 { 503 /* Do nothing but give genpd the impression that we were successful */ 504 return 0; 505 } 506 EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable); 507