1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #define gf100_clk(p) container_of((p), struct gf100_clk, base) 25 #include "priv.h" 26 #include "pll.h" 27 28 #include <subdev/bios.h> 29 #include <subdev/bios/pll.h> 30 #include <subdev/timer.h> 31 32 struct gf100_clk_info { 33 u32 freq; 34 u32 ssel; 35 u32 mdiv; 36 u32 dsrc; 37 u32 ddiv; 38 u32 coef; 39 }; 40 41 struct gf100_clk { 42 struct nvkm_clk base; 43 struct gf100_clk_info eng[16]; 44 }; 45 46 static u32 read_div(struct gf100_clk *, int, u32, u32); 47 48 static u32 49 read_vco(struct gf100_clk *clk, u32 dsrc) 50 { 51 struct nvkm_device *device = clk->base.subdev.device; 52 u32 ssrc = nvkm_rd32(device, dsrc); 53 if (!(ssrc & 0x00000100)) 54 return nvkm_clk_read(&clk->base, nv_clk_src_sppll0); 55 return nvkm_clk_read(&clk->base, nv_clk_src_sppll1); 56 } 57 58 static u32 59 read_pll(struct gf100_clk *clk, u32 pll) 60 { 61 struct nvkm_device *device = clk->base.subdev.device; 62 u32 ctrl = nvkm_rd32(device, pll + 0x00); 63 u32 coef = nvkm_rd32(device, pll + 0x04); 64 u32 P = (coef & 0x003f0000) >> 16; 65 u32 N = (coef & 0x0000ff00) >> 8; 66 u32 M = (coef & 0x000000ff) >> 0; 67 u32 sclk; 68 69 if (!(ctrl & 0x00000001)) 70 return 0; 71 72 switch (pll) { 73 case 0x00e800: 74 case 0x00e820: 75 sclk = device->crystal; 76 P = 1; 77 break; 78 case 0x132000: 79 sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc); 80 break; 81 case 0x132020: 82 sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref); 83 break; 84 case 0x137000: 85 case 0x137020: 86 case 0x137040: 87 case 0x1370e0: 88 sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140); 89 break; 90 default: 91 return 0; 92 } 93 94 return sclk * N / M / P; 95 } 96 97 static u32 98 read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl) 99 { 100 struct nvkm_device *device = clk->base.subdev.device; 101 u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4)); 102 u32 sclk, sctl, sdiv = 2; 103 104 switch (ssrc & 0x00000003) { 105 case 0: 106 if ((ssrc & 0x00030000) != 0x00030000) 107 return device->crystal; 108 return 108000; 109 case 2: 110 return 100000; 111 case 3: 112 sclk = read_vco(clk, dsrc + (doff * 4)); 113 114 /* Memclk has doff of 0 despite its alt. location */ 115 if (doff <= 2) { 116 sctl = nvkm_rd32(device, dctl + (doff * 4)); 117 118 if (sctl & 0x80000000) { 119 if (ssrc & 0x100) 120 sctl >>= 8; 121 122 sdiv = (sctl & 0x3f) + 2; 123 } 124 } 125 126 return (sclk * 2) / sdiv; 127 default: 128 return 0; 129 } 130 } 131 132 static u32 133 read_clk(struct gf100_clk *clk, int idx) 134 { 135 struct nvkm_device *device = clk->base.subdev.device; 136 u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4)); 137 u32 ssel = nvkm_rd32(device, 0x137100); 138 u32 sclk, sdiv; 139 140 if (ssel & (1 << idx)) { 141 if (idx < 7) 142 sclk = read_pll(clk, 0x137000 + (idx * 0x20)); 143 else 144 sclk = read_pll(clk, 0x1370e0); 145 sdiv = ((sctl & 0x00003f00) >> 8) + 2; 146 } else { 147 sclk = read_div(clk, idx, 0x137160, 0x1371d0); 148 sdiv = ((sctl & 0x0000003f) >> 0) + 2; 149 } 150 151 if (sctl & 0x80000000) 152 return (sclk * 2) / sdiv; 153 154 return sclk; 155 } 156 157 static int 158 gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src) 159 { 160 struct gf100_clk *clk = gf100_clk(base); 161 struct nvkm_subdev *subdev = &clk->base.subdev; 162 struct nvkm_device *device = subdev->device; 163 164 switch (src) { 165 case nv_clk_src_crystal: 166 return device->crystal; 167 case nv_clk_src_href: 168 return 100000; 169 case nv_clk_src_sppll0: 170 return read_pll(clk, 0x00e800); 171 case nv_clk_src_sppll1: 172 return read_pll(clk, 0x00e820); 173 174 case nv_clk_src_mpllsrcref: 175 return read_div(clk, 0, 0x137320, 0x137330); 176 case nv_clk_src_mpllsrc: 177 return read_pll(clk, 0x132020); 178 case nv_clk_src_mpll: 179 return read_pll(clk, 0x132000); 180 case nv_clk_src_mdiv: 181 return read_div(clk, 0, 0x137300, 0x137310); 182 case nv_clk_src_mem: 183 if (nvkm_rd32(device, 0x1373f0) & 0x00000002) 184 return nvkm_clk_read(&clk->base, nv_clk_src_mpll); 185 return nvkm_clk_read(&clk->base, nv_clk_src_mdiv); 186 187 case nv_clk_src_gpc: 188 return read_clk(clk, 0x00); 189 case nv_clk_src_rop: 190 return read_clk(clk, 0x01); 191 case nv_clk_src_hubk07: 192 return read_clk(clk, 0x02); 193 case nv_clk_src_hubk06: 194 return read_clk(clk, 0x07); 195 case nv_clk_src_hubk01: 196 return read_clk(clk, 0x08); 197 case nv_clk_src_copy: 198 return read_clk(clk, 0x09); 199 case nv_clk_src_pmu: 200 return read_clk(clk, 0x0c); 201 case nv_clk_src_vdec: 202 return read_clk(clk, 0x0e); 203 default: 204 nvkm_error(subdev, "invalid clock source %d\n", src); 205 return -EINVAL; 206 } 207 } 208 209 static u32 210 calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv) 211 { 212 u32 div = min((ref * 2) / freq, (u32)65); 213 if (div < 2) 214 div = 2; 215 216 *ddiv = div - 2; 217 return (ref * 2) / div; 218 } 219 220 static u32 221 calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv) 222 { 223 u32 sclk; 224 225 /* use one of the fixed frequencies if possible */ 226 *ddiv = 0x00000000; 227 switch (freq) { 228 case 27000: 229 case 108000: 230 *dsrc = 0x00000000; 231 if (freq == 108000) 232 *dsrc |= 0x00030000; 233 return freq; 234 case 100000: 235 *dsrc = 0x00000002; 236 return freq; 237 default: 238 *dsrc = 0x00000003; 239 break; 240 } 241 242 /* otherwise, calculate the closest divider */ 243 sclk = read_vco(clk, 0x137160 + (idx * 4)); 244 if (idx < 7) 245 sclk = calc_div(clk, idx, sclk, freq, ddiv); 246 return sclk; 247 } 248 249 static u32 250 calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef) 251 { 252 struct nvkm_subdev *subdev = &clk->base.subdev; 253 struct nvkm_bios *bios = subdev->device->bios; 254 struct nvbios_pll limits; 255 int N, M, P, ret; 256 257 ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits); 258 if (ret) 259 return 0; 260 261 limits.refclk = read_div(clk, idx, 0x137120, 0x137140); 262 if (!limits.refclk) 263 return 0; 264 265 ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P); 266 if (ret <= 0) 267 return 0; 268 269 *coef = (P << 16) | (N << 8) | M; 270 return ret; 271 } 272 273 static int 274 calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom) 275 { 276 struct gf100_clk_info *info = &clk->eng[idx]; 277 u32 freq = cstate->domain[dom]; 278 u32 src0, div0, div1D, div1P = 0; 279 u32 clk0, clk1 = 0; 280 281 /* invalid clock domain */ 282 if (!freq) 283 return 0; 284 285 /* first possible path, using only dividers */ 286 clk0 = calc_src(clk, idx, freq, &src0, &div0); 287 clk0 = calc_div(clk, idx, clk0, freq, &div1D); 288 289 /* see if we can get any closer using PLLs */ 290 if (clk0 != freq && (0x00004387 & (1 << idx))) { 291 if (idx <= 7) 292 clk1 = calc_pll(clk, idx, freq, &info->coef); 293 else 294 clk1 = cstate->domain[nv_clk_src_hubk06]; 295 clk1 = calc_div(clk, idx, clk1, freq, &div1P); 296 } 297 298 /* select the method which gets closest to target freq */ 299 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) { 300 info->dsrc = src0; 301 if (div0) { 302 info->ddiv |= 0x80000000; 303 info->ddiv |= div0 << 8; 304 info->ddiv |= div0; 305 } 306 if (div1D) { 307 info->mdiv |= 0x80000000; 308 info->mdiv |= div1D; 309 } 310 info->ssel = info->coef = 0; 311 info->freq = clk0; 312 } else { 313 if (div1P) { 314 info->mdiv |= 0x80000000; 315 info->mdiv |= div1P << 8; 316 } 317 info->ssel = (1 << idx); 318 info->freq = clk1; 319 } 320 321 return 0; 322 } 323 324 static int 325 gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) 326 { 327 struct gf100_clk *clk = gf100_clk(base); 328 int ret; 329 330 if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) || 331 (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) || 332 (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) || 333 (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) || 334 (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) || 335 (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) || 336 (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) || 337 (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec))) 338 return ret; 339 340 return 0; 341 } 342 343 static void 344 gf100_clk_prog_0(struct gf100_clk *clk, int idx) 345 { 346 struct gf100_clk_info *info = &clk->eng[idx]; 347 struct nvkm_device *device = clk->base.subdev.device; 348 if (idx < 7 && !info->ssel) { 349 nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv); 350 nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc); 351 } 352 } 353 354 static void 355 gf100_clk_prog_1(struct gf100_clk *clk, int idx) 356 { 357 struct nvkm_device *device = clk->base.subdev.device; 358 nvkm_mask(device, 0x137100, (1 << idx), 0x00000000); 359 nvkm_msec(device, 2000, 360 if (!(nvkm_rd32(device, 0x137100) & (1 << idx))) 361 break; 362 ); 363 } 364 365 static void 366 gf100_clk_prog_2(struct gf100_clk *clk, int idx) 367 { 368 struct gf100_clk_info *info = &clk->eng[idx]; 369 struct nvkm_device *device = clk->base.subdev.device; 370 const u32 addr = 0x137000 + (idx * 0x20); 371 if (idx <= 7) { 372 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000); 373 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000); 374 if (info->coef) { 375 nvkm_wr32(device, addr + 0x04, info->coef); 376 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001); 377 378 /* Test PLL lock */ 379 nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000); 380 nvkm_msec(device, 2000, 381 if (nvkm_rd32(device, addr + 0x00) & 0x00020000) 382 break; 383 ); 384 nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010); 385 386 /* Enable sync mode */ 387 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004); 388 } 389 } 390 } 391 392 static void 393 gf100_clk_prog_3(struct gf100_clk *clk, int idx) 394 { 395 struct gf100_clk_info *info = &clk->eng[idx]; 396 struct nvkm_device *device = clk->base.subdev.device; 397 if (info->ssel) { 398 nvkm_mask(device, 0x137100, (1 << idx), info->ssel); 399 nvkm_msec(device, 2000, 400 u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx); 401 if (tmp == info->ssel) 402 break; 403 ); 404 } 405 } 406 407 static void 408 gf100_clk_prog_4(struct gf100_clk *clk, int idx) 409 { 410 struct gf100_clk_info *info = &clk->eng[idx]; 411 struct nvkm_device *device = clk->base.subdev.device; 412 nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv); 413 } 414 415 static int 416 gf100_clk_prog(struct nvkm_clk *base) 417 { 418 struct gf100_clk *clk = gf100_clk(base); 419 struct { 420 void (*exec)(struct gf100_clk *, int); 421 } stage[] = { 422 { gf100_clk_prog_0 }, /* div programming */ 423 { gf100_clk_prog_1 }, /* select div mode */ 424 { gf100_clk_prog_2 }, /* (maybe) program pll */ 425 { gf100_clk_prog_3 }, /* (maybe) select pll mode */ 426 { gf100_clk_prog_4 }, /* final divider */ 427 }; 428 int i, j; 429 430 for (i = 0; i < ARRAY_SIZE(stage); i++) { 431 for (j = 0; j < ARRAY_SIZE(clk->eng); j++) { 432 if (!clk->eng[j].freq) 433 continue; 434 stage[i].exec(clk, j); 435 } 436 } 437 438 return 0; 439 } 440 441 static void 442 gf100_clk_tidy(struct nvkm_clk *base) 443 { 444 struct gf100_clk *clk = gf100_clk(base); 445 memset(clk->eng, 0x00, sizeof(clk->eng)); 446 } 447 448 static const struct nvkm_clk_func 449 gf100_clk = { 450 .read = gf100_clk_read, 451 .calc = gf100_clk_calc, 452 .prog = gf100_clk_prog, 453 .tidy = gf100_clk_tidy, 454 .domains = { 455 { nv_clk_src_crystal, 0xff }, 456 { nv_clk_src_href , 0xff }, 457 { nv_clk_src_hubk06 , 0x00 }, 458 { nv_clk_src_hubk01 , 0x01 }, 459 { nv_clk_src_copy , 0x02 }, 460 { nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 }, 461 { nv_clk_src_rop , 0x04 }, 462 { nv_clk_src_mem , 0x05, 0, "memory", 1000 }, 463 { nv_clk_src_vdec , 0x06 }, 464 { nv_clk_src_pmu , 0x0a }, 465 { nv_clk_src_hubk07 , 0x0b }, 466 { nv_clk_src_max } 467 } 468 }; 469 470 int 471 gf100_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 472 struct nvkm_clk **pclk) 473 { 474 struct gf100_clk *clk; 475 476 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL))) 477 return -ENOMEM; 478 *pclk = &clk->base; 479 480 return nvkm_clk_ctor(&gf100_clk, device, type, inst, false, &clk->base); 481 } 482