1 /* 2 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 * 22 * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c 23 * 24 */ 25 #include "priv.h" 26 #include "gk20a.h" 27 28 #include <core/tegra.h> 29 #include <subdev/timer.h> 30 31 #define KHZ (1000) 32 #define MHZ (KHZ * 1000) 33 34 #define MASK(w) ((1 << (w)) - 1) 35 36 #define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0) 37 #define GPCPLL_CFG_ENABLE BIT(0) 38 #define GPCPLL_CFG_IDDQ BIT(1) 39 #define GPCPLL_CFG_LOCK_DET_OFF BIT(4) 40 #define GPCPLL_CFG_LOCK BIT(17) 41 42 #define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4) 43 #define GPCPLL_COEFF_M_SHIFT 0 44 #define GPCPLL_COEFF_M_WIDTH 8 45 #define GPCPLL_COEFF_N_SHIFT 8 46 #define GPCPLL_COEFF_N_WIDTH 8 47 #define GPCPLL_COEFF_P_SHIFT 16 48 #define GPCPLL_COEFF_P_WIDTH 6 49 50 #define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc) 51 #define GPCPLL_CFG2_SETUP2_SHIFT 16 52 #define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 53 54 #define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18) 55 #define GPCPLL_CFG3_PLL_STEPB_SHIFT 16 56 57 #define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800 58 #define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c) 59 #define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0 60 #define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8 61 #define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16 62 #define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22 63 #define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31 64 65 #define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100) 66 #define SEL_VCO_GPC2CLK_OUT_SHIFT 0 67 68 #define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250) 69 #define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1 70 #define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31 71 #define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1 72 #define GPC2CLK_OUT_VCODIV_WIDTH 6 73 #define GPC2CLK_OUT_VCODIV_SHIFT 8 74 #define GPC2CLK_OUT_VCODIV1 0 75 #define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \ 76 GPC2CLK_OUT_VCODIV_SHIFT) 77 #define GPC2CLK_OUT_BYPDIV_WIDTH 6 78 #define GPC2CLK_OUT_BYPDIV_SHIFT 0 79 #define GPC2CLK_OUT_BYPDIV31 0x3c 80 #define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \ 81 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\ 82 | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\ 83 | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT)) 84 #define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \ 85 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \ 86 | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \ 87 | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT)) 88 89 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0) 90 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24 91 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \ 92 (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT) 93 94 static const u8 _pl_to_div[] = { 95 /* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ 96 /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32, 97 }; 98 99 static u32 pl_to_div(u32 pl) 100 { 101 if (pl >= ARRAY_SIZE(_pl_to_div)) 102 return 1; 103 104 return _pl_to_div[pl]; 105 } 106 107 static u32 div_to_pl(u32 div) 108 { 109 u32 pl; 110 111 for (pl = 0; pl < ARRAY_SIZE(_pl_to_div) - 1; pl++) { 112 if (_pl_to_div[pl] >= div) 113 return pl; 114 } 115 116 return ARRAY_SIZE(_pl_to_div) - 1; 117 } 118 119 static const struct gk20a_clk_pllg_params gk20a_pllg_params = { 120 .min_vco = 1000000, .max_vco = 2064000, 121 .min_u = 12000, .max_u = 38000, 122 .min_m = 1, .max_m = 255, 123 .min_n = 8, .max_n = 255, 124 .min_pl = 1, .max_pl = 32, 125 }; 126 127 static void 128 gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll) 129 { 130 struct nvkm_device *device = clk->base.subdev.device; 131 u32 val; 132 133 val = nvkm_rd32(device, GPCPLL_COEFF); 134 pll->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); 135 pll->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH); 136 pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); 137 } 138 139 static u32 140 gk20a_pllg_calc_rate(struct gk20a_clk *clk) 141 { 142 u32 rate; 143 u32 divider; 144 145 rate = clk->parent_rate * clk->pll.n; 146 divider = clk->pll.m * clk->pl_to_div(clk->pll.pl); 147 148 return rate / divider / 2; 149 } 150 151 static int 152 gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) 153 { 154 struct nvkm_subdev *subdev = &clk->base.subdev; 155 u32 target_clk_f, ref_clk_f, target_freq; 156 u32 min_vco_f, max_vco_f; 157 u32 low_pl, high_pl, best_pl; 158 u32 target_vco_f; 159 u32 best_m, best_n; 160 u32 best_delta = ~0; 161 u32 pl; 162 163 target_clk_f = rate * 2 / KHZ; 164 ref_clk_f = clk->parent_rate / KHZ; 165 166 target_vco_f = target_clk_f + target_clk_f / 50; 167 max_vco_f = max(clk->params->max_vco, target_vco_f); 168 min_vco_f = clk->params->min_vco; 169 best_m = clk->params->max_m; 170 best_n = clk->params->min_n; 171 best_pl = clk->params->min_pl; 172 173 /* min_pl <= high_pl <= max_pl */ 174 high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f; 175 high_pl = min(high_pl, clk->params->max_pl); 176 high_pl = max(high_pl, clk->params->min_pl); 177 high_pl = clk->div_to_pl(high_pl); 178 179 /* min_pl <= low_pl <= max_pl */ 180 low_pl = min_vco_f / target_vco_f; 181 low_pl = min(low_pl, clk->params->max_pl); 182 low_pl = max(low_pl, clk->params->min_pl); 183 low_pl = clk->div_to_pl(low_pl); 184 185 nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl, 186 clk->pl_to_div(low_pl), high_pl, clk->pl_to_div(high_pl)); 187 188 /* Select lowest possible VCO */ 189 for (pl = low_pl; pl <= high_pl; pl++) { 190 u32 m, n, n2; 191 192 target_vco_f = target_clk_f * clk->pl_to_div(pl); 193 194 for (m = clk->params->min_m; m <= clk->params->max_m; m++) { 195 u32 u_f = ref_clk_f / m; 196 197 if (u_f < clk->params->min_u) 198 break; 199 if (u_f > clk->params->max_u) 200 continue; 201 202 n = (target_vco_f * m) / ref_clk_f; 203 n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f; 204 205 if (n > clk->params->max_n) 206 break; 207 208 for (; n <= n2; n++) { 209 u32 vco_f; 210 211 if (n < clk->params->min_n) 212 continue; 213 if (n > clk->params->max_n) 214 break; 215 216 vco_f = ref_clk_f * n / m; 217 218 if (vco_f >= min_vco_f && vco_f <= max_vco_f) { 219 u32 delta, lwv; 220 221 lwv = (vco_f + (clk->pl_to_div(pl) / 2)) 222 / clk->pl_to_div(pl); 223 delta = abs(lwv - target_clk_f); 224 225 if (delta < best_delta) { 226 best_delta = delta; 227 best_m = m; 228 best_n = n; 229 best_pl = pl; 230 231 if (best_delta == 0) 232 goto found_match; 233 } 234 } 235 } 236 } 237 } 238 239 found_match: 240 WARN_ON(best_delta == ~0); 241 242 if (best_delta != 0) 243 nvkm_debug(subdev, 244 "no best match for target @ %dMHz on gpc_pll", 245 target_clk_f / KHZ); 246 247 clk->pll.m = best_m; 248 clk->pll.n = best_n; 249 clk->pll.pl = best_pl; 250 251 target_freq = gk20a_pllg_calc_rate(clk); 252 253 nvkm_debug(subdev, 254 "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n", 255 target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl, 256 clk->pl_to_div(clk->pll.pl)); 257 return 0; 258 } 259 260 static int 261 gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) 262 { 263 struct nvkm_subdev *subdev = &clk->base.subdev; 264 struct nvkm_device *device = subdev->device; 265 u32 val; 266 int ret = 0; 267 268 /* get old coefficients */ 269 val = nvkm_rd32(device, GPCPLL_COEFF); 270 /* do nothing if NDIV is the same */ 271 if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH))) 272 return 0; 273 274 /* pll slowdown mode */ 275 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, 276 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), 277 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); 278 279 /* new ndiv ready for ramp */ 280 val = nvkm_rd32(device, GPCPLL_COEFF); 281 val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT); 282 val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; 283 udelay(1); 284 nvkm_wr32(device, GPCPLL_COEFF, val); 285 286 /* dynamic ramp to new ndiv */ 287 udelay(1); 288 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, 289 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 290 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT)); 291 292 /* wait for ramping to complete */ 293 if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG, 294 GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK, 295 GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0) 296 ret = -ETIMEDOUT; 297 298 /* exit slowdown mode */ 299 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, 300 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | 301 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); 302 nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); 303 304 return ret; 305 } 306 307 static void 308 gk20a_pllg_enable(struct gk20a_clk *clk) 309 { 310 struct nvkm_device *device = clk->base.subdev.device; 311 312 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); 313 nvkm_rd32(device, GPCPLL_CFG); 314 } 315 316 static void 317 gk20a_pllg_disable(struct gk20a_clk *clk) 318 { 319 struct nvkm_device *device = clk->base.subdev.device; 320 321 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); 322 nvkm_rd32(device, GPCPLL_CFG); 323 } 324 325 static int 326 _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide) 327 { 328 struct nvkm_subdev *subdev = &clk->base.subdev; 329 struct nvkm_device *device = subdev->device; 330 u32 val, cfg; 331 struct gk20a_pll old_pll; 332 u32 n_lo; 333 334 /* get old coefficients */ 335 gk20a_pllg_read_mnp(clk, &old_pll); 336 337 /* do NDIV slide if there is no change in M and PL */ 338 cfg = nvkm_rd32(device, GPCPLL_CFG); 339 if (allow_slide && clk->pll.m == old_pll.m && 340 clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) { 341 return gk20a_pllg_slide(clk, clk->pll.n); 342 } 343 344 /* slide down to NDIV_LO */ 345 if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) { 346 int ret; 347 348 n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco, 349 clk->parent_rate / KHZ); 350 ret = gk20a_pllg_slide(clk, n_lo); 351 352 if (ret) 353 return ret; 354 } 355 356 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */ 357 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, 358 0x2 << GPC2CLK_OUT_VCODIV_SHIFT); 359 360 /* put PLL in bypass before programming it */ 361 val = nvkm_rd32(device, SEL_VCO); 362 val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); 363 udelay(2); 364 nvkm_wr32(device, SEL_VCO, val); 365 366 /* get out from IDDQ */ 367 val = nvkm_rd32(device, GPCPLL_CFG); 368 if (val & GPCPLL_CFG_IDDQ) { 369 val &= ~GPCPLL_CFG_IDDQ; 370 nvkm_wr32(device, GPCPLL_CFG, val); 371 nvkm_rd32(device, GPCPLL_CFG); 372 udelay(2); 373 } 374 375 gk20a_pllg_disable(clk); 376 377 nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__, 378 clk->pll.m, clk->pll.n, clk->pll.pl); 379 380 n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco, 381 clk->parent_rate / KHZ); 382 val = clk->pll.m << GPCPLL_COEFF_M_SHIFT; 383 val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT; 384 val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT; 385 nvkm_wr32(device, GPCPLL_COEFF, val); 386 387 gk20a_pllg_enable(clk); 388 389 val = nvkm_rd32(device, GPCPLL_CFG); 390 if (val & GPCPLL_CFG_LOCK_DET_OFF) { 391 val &= ~GPCPLL_CFG_LOCK_DET_OFF; 392 nvkm_wr32(device, GPCPLL_CFG, val); 393 } 394 395 if (nvkm_usec(device, 300, 396 if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK) 397 break; 398 ) < 0) 399 return -ETIMEDOUT; 400 401 /* switch to VCO mode */ 402 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 403 BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); 404 405 /* restore out divider 1:1 */ 406 val = nvkm_rd32(device, GPC2CLK_OUT); 407 if ((val & GPC2CLK_OUT_VCODIV_MASK) != 408 (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) { 409 val &= ~GPC2CLK_OUT_VCODIV_MASK; 410 val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT; 411 udelay(2); 412 nvkm_wr32(device, GPC2CLK_OUT, val); 413 /* Intentional 2nd write to assure linear divider operation */ 414 nvkm_wr32(device, GPC2CLK_OUT, val); 415 nvkm_rd32(device, GPC2CLK_OUT); 416 } 417 418 /* slide up to new NDIV */ 419 return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0; 420 } 421 422 static int 423 gk20a_pllg_program_mnp(struct gk20a_clk *clk) 424 { 425 int err; 426 427 err = _gk20a_pllg_program_mnp(clk, true); 428 if (err) 429 err = _gk20a_pllg_program_mnp(clk, false); 430 431 return err; 432 } 433 434 static struct nvkm_pstate 435 gk20a_pstates[] = { 436 { 437 .base = { 438 .domain[nv_clk_src_gpc] = 72000, 439 .voltage = 0, 440 }, 441 }, 442 { 443 .base = { 444 .domain[nv_clk_src_gpc] = 108000, 445 .voltage = 1, 446 }, 447 }, 448 { 449 .base = { 450 .domain[nv_clk_src_gpc] = 180000, 451 .voltage = 2, 452 }, 453 }, 454 { 455 .base = { 456 .domain[nv_clk_src_gpc] = 252000, 457 .voltage = 3, 458 }, 459 }, 460 { 461 .base = { 462 .domain[nv_clk_src_gpc] = 324000, 463 .voltage = 4, 464 }, 465 }, 466 { 467 .base = { 468 .domain[nv_clk_src_gpc] = 396000, 469 .voltage = 5, 470 }, 471 }, 472 { 473 .base = { 474 .domain[nv_clk_src_gpc] = 468000, 475 .voltage = 6, 476 }, 477 }, 478 { 479 .base = { 480 .domain[nv_clk_src_gpc] = 540000, 481 .voltage = 7, 482 }, 483 }, 484 { 485 .base = { 486 .domain[nv_clk_src_gpc] = 612000, 487 .voltage = 8, 488 }, 489 }, 490 { 491 .base = { 492 .domain[nv_clk_src_gpc] = 648000, 493 .voltage = 9, 494 }, 495 }, 496 { 497 .base = { 498 .domain[nv_clk_src_gpc] = 684000, 499 .voltage = 10, 500 }, 501 }, 502 { 503 .base = { 504 .domain[nv_clk_src_gpc] = 708000, 505 .voltage = 11, 506 }, 507 }, 508 { 509 .base = { 510 .domain[nv_clk_src_gpc] = 756000, 511 .voltage = 12, 512 }, 513 }, 514 { 515 .base = { 516 .domain[nv_clk_src_gpc] = 804000, 517 .voltage = 13, 518 }, 519 }, 520 { 521 .base = { 522 .domain[nv_clk_src_gpc] = 852000, 523 .voltage = 14, 524 }, 525 }, 526 }; 527 528 int 529 gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src) 530 { 531 struct gk20a_clk *clk = gk20a_clk(base); 532 struct nvkm_subdev *subdev = &clk->base.subdev; 533 struct nvkm_device *device = subdev->device; 534 535 switch (src) { 536 case nv_clk_src_crystal: 537 return device->crystal; 538 case nv_clk_src_gpc: 539 gk20a_pllg_read_mnp(clk, &clk->pll); 540 return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV; 541 default: 542 nvkm_error(subdev, "invalid clock source %d\n", src); 543 return -EINVAL; 544 } 545 } 546 547 int 548 gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) 549 { 550 struct gk20a_clk *clk = gk20a_clk(base); 551 552 return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] * 553 GK20A_CLK_GPC_MDIV); 554 } 555 556 int 557 gk20a_clk_prog(struct nvkm_clk *base) 558 { 559 struct gk20a_clk *clk = gk20a_clk(base); 560 561 return gk20a_pllg_program_mnp(clk); 562 } 563 564 void 565 gk20a_clk_tidy(struct nvkm_clk *base) 566 { 567 } 568 569 int 570 gk20a_clk_setup_slide(struct gk20a_clk *clk) 571 { 572 struct nvkm_subdev *subdev = &clk->base.subdev; 573 struct nvkm_device *device = subdev->device; 574 u32 step_a, step_b; 575 576 switch (clk->parent_rate) { 577 case 12000000: 578 case 12800000: 579 case 13000000: 580 step_a = 0x2b; 581 step_b = 0x0b; 582 break; 583 case 19200000: 584 step_a = 0x12; 585 step_b = 0x08; 586 break; 587 case 38400000: 588 step_a = 0x04; 589 step_b = 0x05; 590 break; 591 default: 592 nvkm_error(subdev, "invalid parent clock rate %u KHz", 593 clk->parent_rate / KHZ); 594 return -EINVAL; 595 } 596 597 nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, 598 step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT); 599 nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, 600 step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT); 601 602 return 0; 603 } 604 605 void 606 gk20a_clk_fini(struct nvkm_clk *base) 607 { 608 struct nvkm_device *device = base->subdev.device; 609 struct gk20a_clk *clk = gk20a_clk(base); 610 u32 val; 611 612 /* slide to VCO min */ 613 val = nvkm_rd32(device, GPCPLL_CFG); 614 if (val & GPCPLL_CFG_ENABLE) { 615 struct gk20a_pll pll; 616 u32 n_lo; 617 618 gk20a_pllg_read_mnp(clk, &pll); 619 n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco, 620 clk->parent_rate / KHZ); 621 gk20a_pllg_slide(clk, n_lo); 622 } 623 624 /* put PLL in bypass before disabling it */ 625 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); 626 627 gk20a_pllg_disable(clk); 628 } 629 630 static int 631 gk20a_clk_init(struct nvkm_clk *base) 632 { 633 struct gk20a_clk *clk = gk20a_clk(base); 634 struct nvkm_subdev *subdev = &clk->base.subdev; 635 struct nvkm_device *device = subdev->device; 636 int ret; 637 638 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, 639 GPC2CLK_OUT_INIT_VAL); 640 641 ret = gk20a_clk_setup_slide(clk); 642 if (ret) 643 return ret; 644 645 /* Start with lowest frequency */ 646 base->func->calc(base, &base->func->pstates[0].base); 647 ret = base->func->prog(&clk->base); 648 if (ret) { 649 nvkm_error(subdev, "cannot initialize clock\n"); 650 return ret; 651 } 652 653 return 0; 654 } 655 656 static const struct nvkm_clk_func 657 gk20a_clk = { 658 .init = gk20a_clk_init, 659 .fini = gk20a_clk_fini, 660 .read = gk20a_clk_read, 661 .calc = gk20a_clk_calc, 662 .prog = gk20a_clk_prog, 663 .tidy = gk20a_clk_tidy, 664 .pstates = gk20a_pstates, 665 .nr_pstates = ARRAY_SIZE(gk20a_pstates), 666 .domains = { 667 { nv_clk_src_crystal, 0xff }, 668 { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, 669 { nv_clk_src_max } 670 } 671 }; 672 673 int 674 _gk20a_clk_ctor(struct nvkm_device *device, int index, 675 const struct nvkm_clk_func *func, 676 const struct gk20a_clk_pllg_params *params, 677 struct gk20a_clk *clk) 678 { 679 struct nvkm_device_tegra *tdev = device->func->tegra(device); 680 int ret; 681 int i; 682 683 /* Finish initializing the pstates */ 684 for (i = 0; i < func->nr_pstates; i++) { 685 INIT_LIST_HEAD(&func->pstates[i].list); 686 func->pstates[i].pstate = i + 1; 687 } 688 689 clk->params = params; 690 clk->parent_rate = clk_get_rate(tdev->clk); 691 692 ret = nvkm_clk_ctor(func, device, index, true, &clk->base); 693 if (ret) 694 return ret; 695 696 nvkm_debug(&clk->base.subdev, "parent clock rate: %d Khz\n", 697 clk->parent_rate / KHZ); 698 699 return 0; 700 } 701 702 int 703 gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) 704 { 705 struct gk20a_clk *clk; 706 int ret; 707 708 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 709 if (!clk) 710 return -ENOMEM; 711 *pclk = &clk->base; 712 713 ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params, 714 clk); 715 716 clk->pl_to_div = pl_to_div; 717 clk->div_to_pl = div_to_pl; 718 719 return ret; 720 } 721