1 /* 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 * 22 * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c 23 * 24 */ 25 #define gk20a_clk(p) container_of((p), struct gk20a_clk, base) 26 #include "priv.h" 27 28 #include <core/tegra.h> 29 #include <subdev/timer.h> 30 31 #define MHZ (1000 * 1000) 32 33 #define MASK(w) ((1 << w) - 1) 34 35 #define SYS_GPCPLL_CFG_BASE 0x00137000 36 #define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800 37 38 #define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0) 39 #define GPCPLL_CFG_ENABLE BIT(0) 40 #define GPCPLL_CFG_IDDQ BIT(1) 41 #define GPCPLL_CFG_LOCK_DET_OFF BIT(4) 42 #define GPCPLL_CFG_LOCK BIT(17) 43 44 #define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4) 45 #define GPCPLL_COEFF_M_SHIFT 0 46 #define GPCPLL_COEFF_M_WIDTH 8 47 #define GPCPLL_COEFF_N_SHIFT 8 48 #define GPCPLL_COEFF_N_WIDTH 8 49 #define GPCPLL_COEFF_P_SHIFT 16 50 #define GPCPLL_COEFF_P_WIDTH 6 51 52 #define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc) 53 #define GPCPLL_CFG2_SETUP2_SHIFT 16 54 #define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 55 56 #define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18) 57 #define GPCPLL_CFG3_PLL_STEPB_SHIFT 16 58 59 #define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c) 60 #define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0 61 #define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8 62 #define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16 63 #define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22 64 #define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31 65 66 #define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100) 67 #define SEL_VCO_GPC2CLK_OUT_SHIFT 0 68 69 #define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250) 70 #define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1 71 #define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31 72 #define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1 73 #define GPC2CLK_OUT_VCODIV_WIDTH 6 74 #define GPC2CLK_OUT_VCODIV_SHIFT 8 75 #define GPC2CLK_OUT_VCODIV1 0 76 #define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \ 77 GPC2CLK_OUT_VCODIV_SHIFT) 78 #define GPC2CLK_OUT_BYPDIV_WIDTH 6 79 #define GPC2CLK_OUT_BYPDIV_SHIFT 0 80 #define GPC2CLK_OUT_BYPDIV31 0x3c 81 #define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \ 82 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\ 83 | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\ 84 | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT)) 85 #define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \ 86 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \ 87 | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \ 88 | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT)) 89 90 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0) 91 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24 92 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \ 93 (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT) 94 95 static const u8 pl_to_div[] = { 96 /* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ 97 /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32, 98 }; 99 100 /* All frequencies in Mhz */ 101 struct gk20a_clk_pllg_params { 102 u32 min_vco, max_vco; 103 u32 min_u, max_u; 104 u32 min_m, max_m; 105 u32 min_n, max_n; 106 u32 min_pl, max_pl; 107 }; 108 109 static const struct gk20a_clk_pllg_params gk20a_pllg_params = { 110 .min_vco = 1000, .max_vco = 2064, 111 .min_u = 12, .max_u = 38, 112 .min_m = 1, .max_m = 255, 113 .min_n = 8, .max_n = 255, 114 .min_pl = 1, .max_pl = 32, 115 }; 116 117 struct gk20a_clk { 118 struct nvkm_clk base; 119 const struct gk20a_clk_pllg_params *params; 120 u32 m, n, pl; 121 u32 parent_rate; 122 }; 123 124 static void 125 gk20a_pllg_read_mnp(struct gk20a_clk *clk) 126 { 127 struct nvkm_device *device = clk->base.subdev.device; 128 u32 val; 129 130 val = nvkm_rd32(device, GPCPLL_COEFF); 131 clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); 132 clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH); 133 clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); 134 } 135 136 static u32 137 gk20a_pllg_calc_rate(struct gk20a_clk *clk) 138 { 139 u32 rate; 140 u32 divider; 141 142 rate = clk->parent_rate * clk->n; 143 divider = clk->m * pl_to_div[clk->pl]; 144 145 return rate / divider / 2; 146 } 147 148 static int 149 gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) 150 { 151 struct nvkm_subdev *subdev = &clk->base.subdev; 152 u32 target_clk_f, ref_clk_f, target_freq; 153 u32 min_vco_f, max_vco_f; 154 u32 low_pl, high_pl, best_pl; 155 u32 target_vco_f, vco_f; 156 u32 best_m, best_n; 157 u32 u_f; 158 u32 m, n, n2; 159 u32 delta, lwv, best_delta = ~0; 160 u32 pl; 161 162 target_clk_f = rate * 2 / MHZ; 163 ref_clk_f = clk->parent_rate / MHZ; 164 165 max_vco_f = clk->params->max_vco; 166 min_vco_f = clk->params->min_vco; 167 best_m = clk->params->max_m; 168 best_n = clk->params->min_n; 169 best_pl = clk->params->min_pl; 170 171 target_vco_f = target_clk_f + target_clk_f / 50; 172 if (max_vco_f < target_vco_f) 173 max_vco_f = target_vco_f; 174 175 /* min_pl <= high_pl <= max_pl */ 176 high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f; 177 high_pl = min(high_pl, clk->params->max_pl); 178 high_pl = max(high_pl, clk->params->min_pl); 179 180 /* min_pl <= low_pl <= max_pl */ 181 low_pl = min_vco_f / target_vco_f; 182 low_pl = min(low_pl, clk->params->max_pl); 183 low_pl = max(low_pl, clk->params->min_pl); 184 185 /* Find Indices of high_pl and low_pl */ 186 for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) { 187 if (pl_to_div[pl] >= low_pl) { 188 low_pl = pl; 189 break; 190 } 191 } 192 for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) { 193 if (pl_to_div[pl] >= high_pl) { 194 high_pl = pl; 195 break; 196 } 197 } 198 199 nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl, 200 pl_to_div[low_pl], high_pl, pl_to_div[high_pl]); 201 202 /* Select lowest possible VCO */ 203 for (pl = low_pl; pl <= high_pl; pl++) { 204 target_vco_f = target_clk_f * pl_to_div[pl]; 205 for (m = clk->params->min_m; m <= clk->params->max_m; m++) { 206 u_f = ref_clk_f / m; 207 208 if (u_f < clk->params->min_u) 209 break; 210 if (u_f > clk->params->max_u) 211 continue; 212 213 n = (target_vco_f * m) / ref_clk_f; 214 n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f; 215 216 if (n > clk->params->max_n) 217 break; 218 219 for (; n <= n2; n++) { 220 if (n < clk->params->min_n) 221 continue; 222 if (n > clk->params->max_n) 223 break; 224 225 vco_f = ref_clk_f * n / m; 226 227 if (vco_f >= min_vco_f && vco_f <= max_vco_f) { 228 lwv = (vco_f + (pl_to_div[pl] / 2)) 229 / pl_to_div[pl]; 230 delta = abs(lwv - target_clk_f); 231 232 if (delta < best_delta) { 233 best_delta = delta; 234 best_m = m; 235 best_n = n; 236 best_pl = pl; 237 238 if (best_delta == 0) 239 goto found_match; 240 } 241 } 242 } 243 } 244 } 245 246 found_match: 247 WARN_ON(best_delta == ~0); 248 249 if (best_delta != 0) 250 nvkm_debug(subdev, 251 "no best match for target @ %dMHz on gpc_pll", 252 target_clk_f); 253 254 clk->m = best_m; 255 clk->n = best_n; 256 clk->pl = best_pl; 257 258 target_freq = gk20a_pllg_calc_rate(clk) / MHZ; 259 260 nvkm_debug(subdev, 261 "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n", 262 target_freq, clk->m, clk->n, clk->pl, pl_to_div[clk->pl]); 263 return 0; 264 } 265 266 static int 267 gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) 268 { 269 struct nvkm_subdev *subdev = &clk->base.subdev; 270 struct nvkm_device *device = subdev->device; 271 u32 val; 272 int ramp_timeout; 273 274 /* get old coefficients */ 275 val = nvkm_rd32(device, GPCPLL_COEFF); 276 /* do nothing if NDIV is the same */ 277 if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH))) 278 return 0; 279 280 /* setup */ 281 nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, 282 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT); 283 nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, 284 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT); 285 286 /* pll slowdown mode */ 287 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, 288 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), 289 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); 290 291 /* new ndiv ready for ramp */ 292 val = nvkm_rd32(device, GPCPLL_COEFF); 293 val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT); 294 val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; 295 udelay(1); 296 nvkm_wr32(device, GPCPLL_COEFF, val); 297 298 /* dynamic ramp to new ndiv */ 299 val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); 300 val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT; 301 udelay(1); 302 nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val); 303 304 for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) { 305 udelay(1); 306 val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG); 307 if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) 308 break; 309 } 310 311 /* exit slowdown mode */ 312 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, 313 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | 314 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); 315 nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); 316 317 if (ramp_timeout <= 0) { 318 nvkm_error(subdev, "gpcpll dynamic ramp timeout\n"); 319 return -ETIMEDOUT; 320 } 321 322 return 0; 323 } 324 325 static void 326 _gk20a_pllg_enable(struct gk20a_clk *clk) 327 { 328 struct nvkm_device *device = clk->base.subdev.device; 329 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); 330 nvkm_rd32(device, GPCPLL_CFG); 331 } 332 333 static void 334 _gk20a_pllg_disable(struct gk20a_clk *clk) 335 { 336 struct nvkm_device *device = clk->base.subdev.device; 337 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); 338 nvkm_rd32(device, GPCPLL_CFG); 339 } 340 341 static int 342 _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide) 343 { 344 struct nvkm_subdev *subdev = &clk->base.subdev; 345 struct nvkm_device *device = subdev->device; 346 u32 val, cfg; 347 u32 m_old, pl_old, n_lo; 348 349 /* get old coefficients */ 350 val = nvkm_rd32(device, GPCPLL_COEFF); 351 m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); 352 pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); 353 354 /* do NDIV slide if there is no change in M and PL */ 355 cfg = nvkm_rd32(device, GPCPLL_CFG); 356 if (allow_slide && clk->m == m_old && clk->pl == pl_old && 357 (cfg & GPCPLL_CFG_ENABLE)) { 358 return gk20a_pllg_slide(clk, clk->n); 359 } 360 361 /* slide down to NDIV_LO */ 362 n_lo = DIV_ROUND_UP(m_old * clk->params->min_vco, 363 clk->parent_rate / MHZ); 364 if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) { 365 int ret = gk20a_pllg_slide(clk, n_lo); 366 367 if (ret) 368 return ret; 369 } 370 371 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */ 372 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, 373 0x2 << GPC2CLK_OUT_VCODIV_SHIFT); 374 375 /* put PLL in bypass before programming it */ 376 val = nvkm_rd32(device, SEL_VCO); 377 val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); 378 udelay(2); 379 nvkm_wr32(device, SEL_VCO, val); 380 381 /* get out from IDDQ */ 382 val = nvkm_rd32(device, GPCPLL_CFG); 383 if (val & GPCPLL_CFG_IDDQ) { 384 val &= ~GPCPLL_CFG_IDDQ; 385 nvkm_wr32(device, GPCPLL_CFG, val); 386 nvkm_rd32(device, GPCPLL_CFG); 387 udelay(2); 388 } 389 390 _gk20a_pllg_disable(clk); 391 392 nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__, 393 clk->m, clk->n, clk->pl); 394 395 n_lo = DIV_ROUND_UP(clk->m * clk->params->min_vco, 396 clk->parent_rate / MHZ); 397 val = clk->m << GPCPLL_COEFF_M_SHIFT; 398 val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT; 399 val |= clk->pl << GPCPLL_COEFF_P_SHIFT; 400 nvkm_wr32(device, GPCPLL_COEFF, val); 401 402 _gk20a_pllg_enable(clk); 403 404 val = nvkm_rd32(device, GPCPLL_CFG); 405 if (val & GPCPLL_CFG_LOCK_DET_OFF) { 406 val &= ~GPCPLL_CFG_LOCK_DET_OFF; 407 nvkm_wr32(device, GPCPLL_CFG, val); 408 } 409 410 if (nvkm_usec(device, 300, 411 if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK) 412 break; 413 ) < 0) 414 return -ETIMEDOUT; 415 416 /* switch to VCO mode */ 417 nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); 418 419 /* restore out divider 1:1 */ 420 val = nvkm_rd32(device, GPC2CLK_OUT); 421 val &= ~GPC2CLK_OUT_VCODIV_MASK; 422 udelay(2); 423 nvkm_wr32(device, GPC2CLK_OUT, val); 424 425 /* slide up to new NDIV */ 426 return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0; 427 } 428 429 static int 430 gk20a_pllg_program_mnp(struct gk20a_clk *clk) 431 { 432 int err; 433 434 err = _gk20a_pllg_program_mnp(clk, true); 435 if (err) 436 err = _gk20a_pllg_program_mnp(clk, false); 437 438 return err; 439 } 440 441 static void 442 gk20a_pllg_disable(struct gk20a_clk *clk) 443 { 444 struct nvkm_device *device = clk->base.subdev.device; 445 u32 val; 446 447 /* slide to VCO min */ 448 val = nvkm_rd32(device, GPCPLL_CFG); 449 if (val & GPCPLL_CFG_ENABLE) { 450 u32 coeff, m, n_lo; 451 452 coeff = nvkm_rd32(device, GPCPLL_COEFF); 453 m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); 454 n_lo = DIV_ROUND_UP(m * clk->params->min_vco, 455 clk->parent_rate / MHZ); 456 gk20a_pllg_slide(clk, n_lo); 457 } 458 459 /* put PLL in bypass before disabling it */ 460 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); 461 462 _gk20a_pllg_disable(clk); 463 } 464 465 #define GK20A_CLK_GPC_MDIV 1000 466 467 static struct nvkm_pstate 468 gk20a_pstates[] = { 469 { 470 .base = { 471 .domain[nv_clk_src_gpc] = 72000, 472 .voltage = 0, 473 }, 474 }, 475 { 476 .base = { 477 .domain[nv_clk_src_gpc] = 108000, 478 .voltage = 1, 479 }, 480 }, 481 { 482 .base = { 483 .domain[nv_clk_src_gpc] = 180000, 484 .voltage = 2, 485 }, 486 }, 487 { 488 .base = { 489 .domain[nv_clk_src_gpc] = 252000, 490 .voltage = 3, 491 }, 492 }, 493 { 494 .base = { 495 .domain[nv_clk_src_gpc] = 324000, 496 .voltage = 4, 497 }, 498 }, 499 { 500 .base = { 501 .domain[nv_clk_src_gpc] = 396000, 502 .voltage = 5, 503 }, 504 }, 505 { 506 .base = { 507 .domain[nv_clk_src_gpc] = 468000, 508 .voltage = 6, 509 }, 510 }, 511 { 512 .base = { 513 .domain[nv_clk_src_gpc] = 540000, 514 .voltage = 7, 515 }, 516 }, 517 { 518 .base = { 519 .domain[nv_clk_src_gpc] = 612000, 520 .voltage = 8, 521 }, 522 }, 523 { 524 .base = { 525 .domain[nv_clk_src_gpc] = 648000, 526 .voltage = 9, 527 }, 528 }, 529 { 530 .base = { 531 .domain[nv_clk_src_gpc] = 684000, 532 .voltage = 10, 533 }, 534 }, 535 { 536 .base = { 537 .domain[nv_clk_src_gpc] = 708000, 538 .voltage = 11, 539 }, 540 }, 541 { 542 .base = { 543 .domain[nv_clk_src_gpc] = 756000, 544 .voltage = 12, 545 }, 546 }, 547 { 548 .base = { 549 .domain[nv_clk_src_gpc] = 804000, 550 .voltage = 13, 551 }, 552 }, 553 { 554 .base = { 555 .domain[nv_clk_src_gpc] = 852000, 556 .voltage = 14, 557 }, 558 }, 559 }; 560 561 static int 562 gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src) 563 { 564 struct gk20a_clk *clk = gk20a_clk(base); 565 struct nvkm_subdev *subdev = &clk->base.subdev; 566 struct nvkm_device *device = subdev->device; 567 568 switch (src) { 569 case nv_clk_src_crystal: 570 return device->crystal; 571 case nv_clk_src_gpc: 572 gk20a_pllg_read_mnp(clk); 573 return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV; 574 default: 575 nvkm_error(subdev, "invalid clock source %d\n", src); 576 return -EINVAL; 577 } 578 } 579 580 static int 581 gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) 582 { 583 struct gk20a_clk *clk = gk20a_clk(base); 584 585 return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] * 586 GK20A_CLK_GPC_MDIV); 587 } 588 589 static int 590 gk20a_clk_prog(struct nvkm_clk *base) 591 { 592 struct gk20a_clk *clk = gk20a_clk(base); 593 594 return gk20a_pllg_program_mnp(clk); 595 } 596 597 static void 598 gk20a_clk_tidy(struct nvkm_clk *base) 599 { 600 } 601 602 static void 603 gk20a_clk_fini(struct nvkm_clk *base) 604 { 605 struct gk20a_clk *clk = gk20a_clk(base); 606 gk20a_pllg_disable(clk); 607 } 608 609 static int 610 gk20a_clk_init(struct nvkm_clk *base) 611 { 612 struct gk20a_clk *clk = gk20a_clk(base); 613 struct nvkm_subdev *subdev = &clk->base.subdev; 614 struct nvkm_device *device = subdev->device; 615 int ret; 616 617 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL); 618 619 ret = gk20a_clk_prog(&clk->base); 620 if (ret) { 621 nvkm_error(subdev, "cannot initialize clock\n"); 622 return ret; 623 } 624 625 return 0; 626 } 627 628 static const struct nvkm_clk_func 629 gk20a_clk = { 630 .init = gk20a_clk_init, 631 .fini = gk20a_clk_fini, 632 .read = gk20a_clk_read, 633 .calc = gk20a_clk_calc, 634 .prog = gk20a_clk_prog, 635 .tidy = gk20a_clk_tidy, 636 .pstates = gk20a_pstates, 637 .nr_pstates = ARRAY_SIZE(gk20a_pstates), 638 .domains = { 639 { nv_clk_src_crystal, 0xff }, 640 { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, 641 { nv_clk_src_max } 642 } 643 }; 644 645 int 646 gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) 647 { 648 struct nvkm_device_tegra *tdev = device->func->tegra(device); 649 struct gk20a_clk *clk; 650 int ret, i; 651 652 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL))) 653 return -ENOMEM; 654 *pclk = &clk->base; 655 656 /* Finish initializing the pstates */ 657 for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) { 658 INIT_LIST_HEAD(&gk20a_pstates[i].list); 659 gk20a_pstates[i].pstate = i + 1; 660 } 661 662 clk->params = &gk20a_pllg_params; 663 clk->parent_rate = clk_get_rate(tdev->clk); 664 665 ret = nvkm_clk_ctor(&gk20a_clk, device, index, true, &clk->base); 666 nvkm_info(&clk->base.subdev, "parent clock rate: %d Mhz\n", 667 clk->parent_rate / MHZ); 668 return ret; 669 } 670