1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #define gk104_ram(p) container_of((p), struct gk104_ram, base) 25 #include "ram.h" 26 #include "ramfuc.h" 27 28 #include <core/option.h> 29 #include <subdev/bios.h> 30 #include <subdev/bios/init.h> 31 #include <subdev/bios/M0205.h> 32 #include <subdev/bios/M0209.h> 33 #include <subdev/bios/pll.h> 34 #include <subdev/bios/rammap.h> 35 #include <subdev/bios/timing.h> 36 #include <subdev/clk.h> 37 #include <subdev/clk/pll.h> 38 #include <subdev/gpio.h> 39 40 struct gk104_ramfuc { 41 struct ramfuc base; 42 43 struct nvbios_pll refpll; 44 struct nvbios_pll mempll; 45 46 struct ramfuc_reg r_gpioMV; 47 u32 r_funcMV[2]; 48 struct ramfuc_reg r_gpio2E; 49 u32 r_func2E[2]; 50 struct ramfuc_reg r_gpiotrig; 51 52 struct ramfuc_reg r_0x132020; 53 struct ramfuc_reg r_0x132028; 54 struct ramfuc_reg r_0x132024; 55 struct ramfuc_reg r_0x132030; 56 struct ramfuc_reg r_0x132034; 57 struct ramfuc_reg r_0x132000; 58 struct ramfuc_reg r_0x132004; 59 struct ramfuc_reg r_0x132040; 60 61 struct ramfuc_reg r_0x10f248; 62 struct ramfuc_reg r_0x10f290; 63 struct ramfuc_reg r_0x10f294; 64 struct ramfuc_reg r_0x10f298; 65 struct ramfuc_reg r_0x10f29c; 66 struct ramfuc_reg r_0x10f2a0; 67 struct ramfuc_reg r_0x10f2a4; 68 struct ramfuc_reg r_0x10f2a8; 69 struct ramfuc_reg r_0x10f2ac; 70 struct ramfuc_reg r_0x10f2cc; 71 struct ramfuc_reg r_0x10f2e8; 72 struct ramfuc_reg r_0x10f250; 73 struct ramfuc_reg r_0x10f24c; 74 struct ramfuc_reg r_0x10fec4; 75 struct ramfuc_reg r_0x10fec8; 76 struct ramfuc_reg r_0x10f604; 77 struct ramfuc_reg r_0x10f614; 78 struct ramfuc_reg r_0x10f610; 79 struct ramfuc_reg r_0x100770; 80 struct ramfuc_reg r_0x100778; 81 struct ramfuc_reg r_0x10f224; 82 83 struct ramfuc_reg r_0x10f870; 84 struct ramfuc_reg r_0x10f698; 85 struct ramfuc_reg r_0x10f694; 86 struct ramfuc_reg r_0x10f6b8; 87 struct ramfuc_reg r_0x10f808; 88 struct ramfuc_reg r_0x10f670; 89 struct ramfuc_reg r_0x10f60c; 90 struct ramfuc_reg r_0x10f830; 91 struct ramfuc_reg r_0x1373ec; 92 struct ramfuc_reg r_0x10f800; 93 struct ramfuc_reg r_0x10f82c; 94 95 struct ramfuc_reg r_0x10f978; 96 struct ramfuc_reg r_0x10f910; 97 struct ramfuc_reg r_0x10f914; 98 99 struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */ 100 101 struct ramfuc_reg r_0x62c000; 102 103 struct ramfuc_reg r_0x10f200; 104 105 struct ramfuc_reg r_0x10f210; 106 struct ramfuc_reg r_0x10f310; 107 struct ramfuc_reg r_0x10f314; 108 struct ramfuc_reg r_0x10f318; 109 struct ramfuc_reg r_0x10f090; 110 struct ramfuc_reg r_0x10f69c; 111 struct ramfuc_reg r_0x10f824; 112 struct ramfuc_reg r_0x1373f0; 113 struct ramfuc_reg r_0x1373f4; 114 struct ramfuc_reg r_0x137320; 115 struct ramfuc_reg r_0x10f65c; 116 struct ramfuc_reg r_0x10f6bc; 117 struct ramfuc_reg r_0x100710; 118 struct ramfuc_reg r_0x100750; 119 }; 120 121 struct gk104_ram { 122 struct nvkm_ram base; 123 struct gk104_ramfuc fuc; 124 125 struct list_head cfg; 126 u32 parts; 127 u32 pmask; 128 u32 pnuts; 129 130 struct nvbios_ramcfg diff; 131 int from; 132 int mode; 133 int N1, fN1, M1, P1; 134 int N2, M2, P2; 135 }; 136 137 /******************************************************************************* 138 * GDDR5 139 ******************************************************************************/ 140 static void 141 gk104_ram_train(struct gk104_ramfuc *fuc, u32 mask, u32 data) 142 { 143 struct gk104_ram *ram = container_of(fuc, typeof(*ram), fuc); 144 u32 addr = 0x110974, i; 145 146 ram_mask(fuc, 0x10f910, mask, data); 147 ram_mask(fuc, 0x10f914, mask, data); 148 149 for (i = 0; (data & 0x80000000) && i < ram->parts; addr += 0x1000, i++) { 150 if (ram->pmask & (1 << i)) 151 continue; 152 ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000); 153 } 154 } 155 156 static void 157 r1373f4_init(struct gk104_ramfuc *fuc) 158 { 159 struct gk104_ram *ram = container_of(fuc, typeof(*ram), fuc); 160 const u32 mcoef = ((--ram->P2 << 28) | (ram->N2 << 8) | ram->M2); 161 const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1); 162 const u32 runk0 = ram->fN1 << 16; 163 const u32 runk1 = ram->fN1; 164 165 if (ram->from == 2) { 166 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100); 167 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010); 168 } else { 169 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010); 170 } 171 172 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000); 173 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000); 174 175 /* (re)program refpll, if required */ 176 if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef || 177 (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) { 178 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000); 179 ram_mask(fuc, 0x132020, 0x00000001, 0x00000000); 180 ram_wr32(fuc, 0x137320, 0x00000000); 181 ram_mask(fuc, 0x132030, 0xffff0000, runk0); 182 ram_mask(fuc, 0x132034, 0x0000ffff, runk1); 183 ram_wr32(fuc, 0x132024, rcoef); 184 ram_mask(fuc, 0x132028, 0x00080000, 0x00080000); 185 ram_mask(fuc, 0x132020, 0x00000001, 0x00000001); 186 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000); 187 ram_mask(fuc, 0x132028, 0x00080000, 0x00000000); 188 } 189 190 /* (re)program mempll, if required */ 191 if (ram->mode == 2) { 192 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000); 193 ram_mask(fuc, 0x132000, 0x80000000, 0x80000000); 194 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000); 195 ram_mask(fuc, 0x132004, 0x103fffff, mcoef); 196 ram_mask(fuc, 0x132000, 0x00000001, 0x00000001); 197 ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000); 198 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100); 199 } else { 200 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010100); 201 } 202 203 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010); 204 } 205 206 static void 207 r1373f4_fini(struct gk104_ramfuc *fuc) 208 { 209 struct gk104_ram *ram = container_of(fuc, typeof(*ram), fuc); 210 struct nvkm_ram_data *next = ram->base.next; 211 u8 v0 = next->bios.ramcfg_11_03_c0; 212 u8 v1 = next->bios.ramcfg_11_03_30; 213 u32 tmp; 214 215 tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000; 216 ram_wr32(fuc, 0x1373ec, tmp | (v1 << 16)); 217 ram_mask(fuc, 0x1373f0, (~ram->mode & 3), 0x00000000); 218 if (ram->mode == 2) { 219 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000002); 220 ram_mask(fuc, 0x1373f4, 0x00001100, 0x00000000); 221 } else { 222 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000001); 223 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000); 224 } 225 ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4); 226 } 227 228 static void 229 gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg, 230 u32 _mask, u32 _data, u32 _copy) 231 { 232 struct nvkm_fb *fb = ram->base.fb; 233 struct ramfuc *fuc = &ram->fuc.base; 234 struct nvkm_device *device = fb->subdev.device; 235 u32 addr = 0x110000 + (reg->addr & 0xfff); 236 u32 mask = _mask | _copy; 237 u32 data = (_data & _mask) | (reg->data & _copy); 238 u32 i; 239 240 for (i = 0; i < 16; i++, addr += 0x1000) { 241 if (ram->pnuts & (1 << i)) { 242 u32 prev = nvkm_rd32(device, addr); 243 u32 next = (prev & ~mask) | data; 244 nvkm_memx_wr32(fuc->memx, addr, next); 245 } 246 } 247 } 248 #define ram_nuts(s,r,m,d,c) \ 249 gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c)) 250 251 static int 252 gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq) 253 { 254 struct gk104_ramfuc *fuc = &ram->fuc; 255 struct nvkm_ram_data *next = ram->base.next; 256 int vc = !next->bios.ramcfg_11_02_08; 257 int mv = !next->bios.ramcfg_11_02_04; 258 u32 mask, data; 259 260 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); 261 ram_block(fuc); 262 ram_wr32(fuc, 0x62c000, 0x0f0f0000); 263 264 /* MR1: turn termination on early, for some reason.. */ 265 if ((ram->base.mr[1] & 0x03c) != 0x030) { 266 ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c); 267 ram_nuts(ram, mr[1], 0x03c, ram->base.mr1_nuts & 0x03c, 0x000); 268 } 269 270 if (vc == 1 && ram_have(fuc, gpio2E)) { 271 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]); 272 if (temp != ram_rd32(fuc, gpio2E)) { 273 ram_wr32(fuc, gpiotrig, 1); 274 ram_nsec(fuc, 20000); 275 } 276 } 277 278 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000); 279 280 gk104_ram_train(fuc, 0x01020000, 0x000c0000); 281 282 ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */ 283 ram_nsec(fuc, 1000); 284 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */ 285 ram_nsec(fuc, 1000); 286 287 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000); 288 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */ 289 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000); 290 ram_wr32(fuc, 0x10f090, 0x00000061); 291 ram_wr32(fuc, 0x10f090, 0xc000007f); 292 ram_nsec(fuc, 1000); 293 294 ram_wr32(fuc, 0x10f698, 0x00000000); 295 ram_wr32(fuc, 0x10f69c, 0x00000000); 296 297 /*XXX: there does appear to be some kind of condition here, simply 298 * modifying these bits in the vbios from the default pl0 299 * entries shows no change. however, the data does appear to 300 * be correct and may be required for the transition back 301 */ 302 mask = 0x800f07e0; 303 data = 0x00030000; 304 if (ram_rd32(fuc, 0x10f978) & 0x00800000) 305 data |= 0x00040000; 306 307 if (1) { 308 data |= 0x800807e0; 309 switch (next->bios.ramcfg_11_03_c0) { 310 case 3: data &= ~0x00000040; break; 311 case 2: data &= ~0x00000100; break; 312 case 1: data &= ~0x80000000; break; 313 case 0: data &= ~0x00000400; break; 314 } 315 316 switch (next->bios.ramcfg_11_03_30) { 317 case 3: data &= ~0x00000020; break; 318 case 2: data &= ~0x00000080; break; 319 case 1: data &= ~0x00080000; break; 320 case 0: data &= ~0x00000200; break; 321 } 322 } 323 324 if (next->bios.ramcfg_11_02_80) 325 mask |= 0x03000000; 326 if (next->bios.ramcfg_11_02_40) 327 mask |= 0x00002000; 328 if (next->bios.ramcfg_11_07_10) 329 mask |= 0x00004000; 330 if (next->bios.ramcfg_11_07_08) 331 mask |= 0x00000003; 332 else { 333 mask |= 0x34000000; 334 if (ram_rd32(fuc, 0x10f978) & 0x00800000) 335 mask |= 0x40000000; 336 } 337 ram_mask(fuc, 0x10f824, mask, data); 338 339 ram_mask(fuc, 0x132040, 0x00010000, 0x00000000); 340 341 if (ram->from == 2 && ram->mode != 2) { 342 ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000); 343 ram_mask(fuc, 0x10f200, 0x18008000, 0x00008000); 344 ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004); 345 ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010); 346 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000); 347 r1373f4_init(fuc); 348 ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001); 349 r1373f4_fini(fuc); 350 ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001); 351 } else 352 if (ram->from != 2 && ram->mode != 2) { 353 r1373f4_init(fuc); 354 r1373f4_fini(fuc); 355 } 356 357 if (ram_have(fuc, gpioMV)) { 358 u32 temp = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]); 359 if (temp != ram_rd32(fuc, gpioMV)) { 360 ram_wr32(fuc, gpiotrig, 1); 361 ram_nsec(fuc, 64000); 362 } 363 } 364 365 if (next->bios.ramcfg_11_02_40 || 366 next->bios.ramcfg_11_07_10) { 367 ram_mask(fuc, 0x132040, 0x00010000, 0x00010000); 368 ram_nsec(fuc, 20000); 369 } 370 371 if (ram->from != 2 && ram->mode == 2) { 372 if (0 /*XXX: Titan */) 373 ram_mask(fuc, 0x10f200, 0x18000000, 0x18000000); 374 ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000); 375 ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002); 376 ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010); 377 r1373f4_init(fuc); 378 r1373f4_fini(fuc); 379 ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000); 380 ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000); 381 } else 382 if (ram->from == 2 && ram->mode == 2) { 383 ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000); 384 r1373f4_init(fuc); 385 r1373f4_fini(fuc); 386 } 387 388 if (ram->mode != 2) /*XXX*/ { 389 if (next->bios.ramcfg_11_07_40) 390 ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000); 391 } 392 393 ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c); 394 ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09); 395 ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09); 396 397 if (!next->bios.ramcfg_11_07_08 && !next->bios.ramcfg_11_07_04) { 398 ram_wr32(fuc, 0x10f698, 0x01010101 * next->bios.ramcfg_11_04); 399 ram_wr32(fuc, 0x10f69c, 0x01010101 * next->bios.ramcfg_11_04); 400 } else 401 if (!next->bios.ramcfg_11_07_08) { 402 ram_wr32(fuc, 0x10f698, 0x00000000); 403 ram_wr32(fuc, 0x10f69c, 0x00000000); 404 } 405 406 if (ram->mode != 2) { 407 u32 data = 0x01000100 * next->bios.ramcfg_11_04; 408 ram_nuke(fuc, 0x10f694); 409 ram_mask(fuc, 0x10f694, 0xff00ff00, data); 410 } 411 412 if (ram->mode == 2 && next->bios.ramcfg_11_08_10) 413 data = 0x00000080; 414 else 415 data = 0x00000000; 416 ram_mask(fuc, 0x10f60c, 0x00000080, data); 417 418 mask = 0x00070000; 419 data = 0x00000000; 420 if (!next->bios.ramcfg_11_02_80) 421 data |= 0x03000000; 422 if (!next->bios.ramcfg_11_02_40) 423 data |= 0x00002000; 424 if (!next->bios.ramcfg_11_07_10) 425 data |= 0x00004000; 426 if (!next->bios.ramcfg_11_07_08) 427 data |= 0x00000003; 428 else 429 data |= 0x74000000; 430 ram_mask(fuc, 0x10f824, mask, data); 431 432 if (next->bios.ramcfg_11_01_08) 433 data = 0x00000000; 434 else 435 data = 0x00001000; 436 ram_mask(fuc, 0x10f200, 0x00001000, data); 437 438 if (ram_rd32(fuc, 0x10f670) & 0x80000000) { 439 ram_nsec(fuc, 10000); 440 ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000); 441 } 442 443 if (next->bios.ramcfg_11_08_01) 444 data = 0x00100000; 445 else 446 data = 0x00000000; 447 ram_mask(fuc, 0x10f82c, 0x00100000, data); 448 449 data = 0x00000000; 450 if (next->bios.ramcfg_11_08_08) 451 data |= 0x00002000; 452 if (next->bios.ramcfg_11_08_04) 453 data |= 0x00001000; 454 if (next->bios.ramcfg_11_08_02) 455 data |= 0x00004000; 456 ram_mask(fuc, 0x10f830, 0x00007000, data); 457 458 /* PFB timing */ 459 ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]); 460 ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]); 461 ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]); 462 ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]); 463 ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]); 464 ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]); 465 ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]); 466 ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]); 467 ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]); 468 ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]); 469 ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]); 470 471 data = mask = 0x00000000; 472 if (ram->diff.ramcfg_11_08_20) { 473 if (next->bios.ramcfg_11_08_20) 474 data |= 0x01000000; 475 mask |= 0x01000000; 476 } 477 ram_mask(fuc, 0x10f200, mask, data); 478 479 data = mask = 0x00000000; 480 if (ram->diff.ramcfg_11_02_03) { 481 data |= next->bios.ramcfg_11_02_03 << 8; 482 mask |= 0x00000300; 483 } 484 if (ram->diff.ramcfg_11_01_10) { 485 if (next->bios.ramcfg_11_01_10) 486 data |= 0x70000000; 487 mask |= 0x70000000; 488 } 489 ram_mask(fuc, 0x10f604, mask, data); 490 491 data = mask = 0x00000000; 492 if (ram->diff.timing_20_30_07) { 493 data |= next->bios.timing_20_30_07 << 28; 494 mask |= 0x70000000; 495 } 496 if (ram->diff.ramcfg_11_01_01) { 497 if (next->bios.ramcfg_11_01_01) 498 data |= 0x00000100; 499 mask |= 0x00000100; 500 } 501 ram_mask(fuc, 0x10f614, mask, data); 502 503 data = mask = 0x00000000; 504 if (ram->diff.timing_20_30_07) { 505 data |= next->bios.timing_20_30_07 << 28; 506 mask |= 0x70000000; 507 } 508 if (ram->diff.ramcfg_11_01_02) { 509 if (next->bios.ramcfg_11_01_02) 510 data |= 0x00000100; 511 mask |= 0x00000100; 512 } 513 ram_mask(fuc, 0x10f610, mask, data); 514 515 mask = 0x33f00000; 516 data = 0x00000000; 517 if (!next->bios.ramcfg_11_01_04) 518 data |= 0x20200000; 519 if (!next->bios.ramcfg_11_07_80) 520 data |= 0x12800000; 521 /*XXX: see note above about there probably being some condition 522 * for the 10f824 stuff that uses ramcfg 3... 523 */ 524 if (next->bios.ramcfg_11_03_f0) { 525 if (next->bios.rammap_11_08_0c) { 526 if (!next->bios.ramcfg_11_07_80) 527 mask |= 0x00000020; 528 else 529 data |= 0x00000020; 530 mask |= 0x00000004; 531 } 532 } else { 533 mask |= 0x40000020; 534 data |= 0x00000004; 535 } 536 537 ram_mask(fuc, 0x10f808, mask, data); 538 539 ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f); 540 541 data = mask = 0x00000000; 542 if (ram->diff.ramcfg_11_02_03) { 543 data |= next->bios.ramcfg_11_02_03; 544 mask |= 0x00000003; 545 } 546 if (ram->diff.ramcfg_11_01_10) { 547 if (next->bios.ramcfg_11_01_10) 548 data |= 0x00000004; 549 mask |= 0x00000004; 550 } 551 552 if ((ram_mask(fuc, 0x100770, mask, data) & mask & 4) != (data & 4)) { 553 ram_mask(fuc, 0x100750, 0x00000008, 0x00000008); 554 ram_wr32(fuc, 0x100710, 0x00000000); 555 ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000); 556 } 557 558 data = next->bios.timing_20_30_07 << 8; 559 if (next->bios.ramcfg_11_01_01) 560 data |= 0x80000000; 561 ram_mask(fuc, 0x100778, 0x00000700, data); 562 563 ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4); 564 data = (next->bios.timing[10] & 0x7f000000) >> 24; 565 if (data < next->bios.timing_20_2c_1fc0) 566 data = next->bios.timing_20_2c_1fc0; 567 ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24); 568 ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16); 569 570 ram_mask(fuc, 0x10fec4, 0x041e0f07, next->bios.timing_20_31_0800 << 26 | 571 next->bios.timing_20_31_0780 << 17 | 572 next->bios.timing_20_31_0078 << 8 | 573 next->bios.timing_20_31_0007); 574 ram_mask(fuc, 0x10fec8, 0x00000027, next->bios.timing_20_31_8000 << 5 | 575 next->bios.timing_20_31_7000); 576 577 ram_wr32(fuc, 0x10f090, 0x4000007e); 578 ram_nsec(fuc, 2000); 579 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */ 580 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */ 581 ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */ 582 583 if (next->bios.ramcfg_11_08_10 && (ram->mode == 2) /*XXX*/) { 584 u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000); 585 gk104_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/ 586 ram_nsec(fuc, 1000); 587 ram_wr32(fuc, 0x10f294, temp); 588 } 589 590 ram_mask(fuc, mr[3], 0xfff, ram->base.mr[3]); 591 ram_wr32(fuc, mr[0], ram->base.mr[0]); 592 ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]); 593 ram_nsec(fuc, 1000); 594 ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]); 595 ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5] & ~0x004); /* LP3 later */ 596 ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]); 597 ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]); 598 599 if (vc == 0 && ram_have(fuc, gpio2E)) { 600 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]); 601 if (temp != ram_rd32(fuc, gpio2E)) { 602 ram_wr32(fuc, gpiotrig, 1); 603 ram_nsec(fuc, 20000); 604 } 605 } 606 607 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000); 608 ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */ 609 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000); 610 ram_nsec(fuc, 1000); 611 ram_nuts(ram, 0x10f200, 0x18808800, 0x00000000, 0x18808800); 612 613 data = ram_rd32(fuc, 0x10f978); 614 data &= ~0x00046144; 615 data |= 0x0000000b; 616 if (!next->bios.ramcfg_11_07_08) { 617 if (!next->bios.ramcfg_11_07_04) 618 data |= 0x0000200c; 619 else 620 data |= 0x00000000; 621 } else { 622 data |= 0x00040044; 623 } 624 ram_wr32(fuc, 0x10f978, data); 625 626 if (ram->mode == 1) { 627 data = ram_rd32(fuc, 0x10f830) | 0x00000001; 628 ram_wr32(fuc, 0x10f830, data); 629 } 630 631 if (!next->bios.ramcfg_11_07_08) { 632 data = 0x88020000; 633 if ( next->bios.ramcfg_11_07_04) 634 data |= 0x10000000; 635 if (!next->bios.rammap_11_08_10) 636 data |= 0x00080000; 637 } else { 638 data = 0xa40e0000; 639 } 640 gk104_ram_train(fuc, 0xbc0f0000, data); 641 if (1) /* XXX: not always? */ 642 ram_nsec(fuc, 1000); 643 644 if (ram->mode == 2) { /*XXX*/ 645 ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004); 646 } 647 648 /* LP3 */ 649 if (ram_mask(fuc, mr[5], 0x004, ram->base.mr[5]) != ram->base.mr[5]) 650 ram_nsec(fuc, 1000); 651 652 if (ram->mode != 2) { 653 ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000); 654 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000); 655 } 656 657 if (next->bios.ramcfg_11_07_02) 658 gk104_ram_train(fuc, 0x80020000, 0x01000000); 659 660 ram_unblock(fuc); 661 ram_wr32(fuc, 0x62c000, 0x0f0f0f00); 662 663 if (next->bios.rammap_11_08_01) 664 data = 0x00000800; 665 else 666 data = 0x00000000; 667 ram_mask(fuc, 0x10f200, 0x00000800, data); 668 ram_nuts(ram, 0x10f200, 0x18808800, data, 0x18808800); 669 return 0; 670 } 671 672 /******************************************************************************* 673 * DDR3 674 ******************************************************************************/ 675 676 static void 677 nvkm_sddr3_dll_reset(struct gk104_ramfuc *fuc) 678 { 679 ram_nuke(fuc, mr[0]); 680 ram_mask(fuc, mr[0], 0x100, 0x100); 681 ram_mask(fuc, mr[0], 0x100, 0x000); 682 } 683 684 static void 685 nvkm_sddr3_dll_disable(struct gk104_ramfuc *fuc) 686 { 687 u32 mr1_old = ram_rd32(fuc, mr[1]); 688 689 if (!(mr1_old & 0x1)) { 690 ram_mask(fuc, mr[1], 0x1, 0x1); 691 ram_nsec(fuc, 1000); 692 } 693 } 694 695 static int 696 gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq) 697 { 698 struct gk104_ramfuc *fuc = &ram->fuc; 699 const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1); 700 const u32 runk0 = ram->fN1 << 16; 701 const u32 runk1 = ram->fN1; 702 struct nvkm_ram_data *next = ram->base.next; 703 int vc = !next->bios.ramcfg_11_02_08; 704 int mv = !next->bios.ramcfg_11_02_04; 705 u32 mask, data; 706 707 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); 708 ram_block(fuc); 709 ram_wr32(fuc, 0x62c000, 0x0f0f0000); 710 711 if (vc == 1 && ram_have(fuc, gpio2E)) { 712 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]); 713 if (temp != ram_rd32(fuc, gpio2E)) { 714 ram_wr32(fuc, gpiotrig, 1); 715 ram_nsec(fuc, 20000); 716 } 717 } 718 719 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000); 720 if (next->bios.ramcfg_11_03_f0) 721 ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000); 722 723 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */ 724 725 if (next->bios.ramcfg_DLLoff) 726 nvkm_sddr3_dll_disable(fuc); 727 728 ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */ 729 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */ 730 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000); 731 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */ 732 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000); 733 ram_nsec(fuc, 1000); 734 735 ram_wr32(fuc, 0x10f090, 0x00000060); 736 ram_wr32(fuc, 0x10f090, 0xc000007e); 737 738 /*XXX: there does appear to be some kind of condition here, simply 739 * modifying these bits in the vbios from the default pl0 740 * entries shows no change. however, the data does appear to 741 * be correct and may be required for the transition back 742 */ 743 mask = 0x00010000; 744 data = 0x00010000; 745 746 if (1) { 747 mask |= 0x800807e0; 748 data |= 0x800807e0; 749 switch (next->bios.ramcfg_11_03_c0) { 750 case 3: data &= ~0x00000040; break; 751 case 2: data &= ~0x00000100; break; 752 case 1: data &= ~0x80000000; break; 753 case 0: data &= ~0x00000400; break; 754 } 755 756 switch (next->bios.ramcfg_11_03_30) { 757 case 3: data &= ~0x00000020; break; 758 case 2: data &= ~0x00000080; break; 759 case 1: data &= ~0x00080000; break; 760 case 0: data &= ~0x00000200; break; 761 } 762 } 763 764 if (next->bios.ramcfg_11_02_80) 765 mask |= 0x03000000; 766 if (next->bios.ramcfg_11_02_40) 767 mask |= 0x00002000; 768 if (next->bios.ramcfg_11_07_10) 769 mask |= 0x00004000; 770 if (next->bios.ramcfg_11_07_08) 771 mask |= 0x00000003; 772 else 773 mask |= 0x14000000; 774 ram_mask(fuc, 0x10f824, mask, data); 775 776 ram_mask(fuc, 0x132040, 0x00010000, 0x00000000); 777 778 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010); 779 data = ram_rd32(fuc, 0x1373ec) & ~0x00030000; 780 data |= next->bios.ramcfg_11_03_30 << 16; 781 ram_wr32(fuc, 0x1373ec, data); 782 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000); 783 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000); 784 785 /* (re)program refpll, if required */ 786 if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef || 787 (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) { 788 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000); 789 ram_mask(fuc, 0x132020, 0x00000001, 0x00000000); 790 ram_wr32(fuc, 0x137320, 0x00000000); 791 ram_mask(fuc, 0x132030, 0xffff0000, runk0); 792 ram_mask(fuc, 0x132034, 0x0000ffff, runk1); 793 ram_wr32(fuc, 0x132024, rcoef); 794 ram_mask(fuc, 0x132028, 0x00080000, 0x00080000); 795 ram_mask(fuc, 0x132020, 0x00000001, 0x00000001); 796 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000); 797 ram_mask(fuc, 0x132028, 0x00080000, 0x00000000); 798 } 799 800 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000010); 801 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000001); 802 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000); 803 804 if (ram_have(fuc, gpioMV)) { 805 u32 temp = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]); 806 if (temp != ram_rd32(fuc, gpioMV)) { 807 ram_wr32(fuc, gpiotrig, 1); 808 ram_nsec(fuc, 64000); 809 } 810 } 811 812 if (next->bios.ramcfg_11_02_40 || 813 next->bios.ramcfg_11_07_10) { 814 ram_mask(fuc, 0x132040, 0x00010000, 0x00010000); 815 ram_nsec(fuc, 20000); 816 } 817 818 if (ram->mode != 2) /*XXX*/ { 819 if (next->bios.ramcfg_11_07_40) 820 ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000); 821 } 822 823 ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c); 824 ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09); 825 ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09); 826 827 mask = 0x00010000; 828 data = 0x00000000; 829 if (!next->bios.ramcfg_11_02_80) 830 data |= 0x03000000; 831 if (!next->bios.ramcfg_11_02_40) 832 data |= 0x00002000; 833 if (!next->bios.ramcfg_11_07_10) 834 data |= 0x00004000; 835 if (!next->bios.ramcfg_11_07_08) 836 data |= 0x00000003; 837 else 838 data |= 0x14000000; 839 ram_mask(fuc, 0x10f824, mask, data); 840 ram_nsec(fuc, 1000); 841 842 if (next->bios.ramcfg_11_08_01) 843 data = 0x00100000; 844 else 845 data = 0x00000000; 846 ram_mask(fuc, 0x10f82c, 0x00100000, data); 847 848 /* PFB timing */ 849 ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]); 850 ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]); 851 ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]); 852 ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]); 853 ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]); 854 ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]); 855 ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]); 856 ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]); 857 ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]); 858 ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]); 859 ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]); 860 861 mask = 0x33f00000; 862 data = 0x00000000; 863 if (!next->bios.ramcfg_11_01_04) 864 data |= 0x20200000; 865 if (!next->bios.ramcfg_11_07_80) 866 data |= 0x12800000; 867 /*XXX: see note above about there probably being some condition 868 * for the 10f824 stuff that uses ramcfg 3... 869 */ 870 if (next->bios.ramcfg_11_03_f0) { 871 if (next->bios.rammap_11_08_0c) { 872 if (!next->bios.ramcfg_11_07_80) 873 mask |= 0x00000020; 874 else 875 data |= 0x00000020; 876 mask |= 0x08000004; 877 } 878 data |= 0x04000000; 879 } else { 880 mask |= 0x44000020; 881 data |= 0x08000004; 882 } 883 884 ram_mask(fuc, 0x10f808, mask, data); 885 886 ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f); 887 888 ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4); 889 890 data = (next->bios.timing[10] & 0x7f000000) >> 24; 891 if (data < next->bios.timing_20_2c_1fc0) 892 data = next->bios.timing_20_2c_1fc0; 893 ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24); 894 895 ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16); 896 897 ram_wr32(fuc, 0x10f090, 0x4000007f); 898 ram_nsec(fuc, 1000); 899 900 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */ 901 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */ 902 ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */ 903 ram_nsec(fuc, 1000); 904 905 if (!next->bios.ramcfg_DLLoff) { 906 ram_mask(fuc, mr[1], 0x1, 0x0); 907 nvkm_sddr3_dll_reset(fuc); 908 } 909 910 ram_mask(fuc, mr[2], 0x00000fff, ram->base.mr[2]); 911 ram_mask(fuc, mr[1], 0xffffffff, ram->base.mr[1]); 912 ram_wr32(fuc, mr[0], ram->base.mr[0]); 913 ram_nsec(fuc, 1000); 914 915 if (!next->bios.ramcfg_DLLoff) { 916 nvkm_sddr3_dll_reset(fuc); 917 ram_nsec(fuc, 1000); 918 } 919 920 if (vc == 0 && ram_have(fuc, gpio2E)) { 921 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]); 922 if (temp != ram_rd32(fuc, gpio2E)) { 923 ram_wr32(fuc, gpiotrig, 1); 924 ram_nsec(fuc, 20000); 925 } 926 } 927 928 if (ram->mode != 2) { 929 ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000); 930 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000); 931 } 932 933 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000); 934 ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */ 935 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000); 936 ram_nsec(fuc, 1000); 937 938 ram_unblock(fuc); 939 ram_wr32(fuc, 0x62c000, 0x0f0f0f00); 940 941 if (next->bios.rammap_11_08_01) 942 data = 0x00000800; 943 else 944 data = 0x00000000; 945 ram_mask(fuc, 0x10f200, 0x00000800, data); 946 return 0; 947 } 948 949 /******************************************************************************* 950 * main hooks 951 ******************************************************************************/ 952 953 static int 954 gk104_ram_calc_data(struct gk104_ram *ram, u32 khz, struct nvkm_ram_data *data) 955 { 956 struct nvkm_subdev *subdev = &ram->base.fb->subdev; 957 struct nvkm_ram_data *cfg; 958 u32 mhz = khz / 1000; 959 960 list_for_each_entry(cfg, &ram->cfg, head) { 961 if (mhz >= cfg->bios.rammap_min && 962 mhz <= cfg->bios.rammap_max) { 963 *data = *cfg; 964 data->freq = khz; 965 return 0; 966 } 967 } 968 969 nvkm_error(subdev, "ramcfg data for %dMHz not found\n", mhz); 970 return -EINVAL; 971 } 972 973 static int 974 gk104_calc_pll_output(int fN, int M, int N, int P, int clk) 975 { 976 return ((clk * N) + (((u16)(fN + 4096) * clk) >> 13)) / (M * P); 977 } 978 979 static int 980 gk104_pll_calc_hiclk(int target_khz, int crystal, 981 int *N1, int *fN1, int *M1, int *P1, 982 int *N2, int *M2, int *P2) 983 { 984 int best_clk = 0, best_err = target_khz, p_ref, n_ref; 985 bool upper = false; 986 987 *M1 = 1; 988 /* M has to be 1, otherwise it gets unstable */ 989 *M2 = 1; 990 /* can be 1 or 2, sticking with 1 for simplicity */ 991 *P2 = 1; 992 993 for (p_ref = 0x7; p_ref >= 0x5; --p_ref) { 994 for (n_ref = 0x25; n_ref <= 0x2b; ++n_ref) { 995 int cur_N, cur_clk, cur_err; 996 997 cur_clk = gk104_calc_pll_output(0, 1, n_ref, p_ref, crystal); 998 cur_N = target_khz / cur_clk; 999 cur_err = target_khz 1000 - gk104_calc_pll_output(0xf000, 1, cur_N, 1, cur_clk); 1001 1002 /* we found a better combination */ 1003 if (cur_err < best_err) { 1004 best_err = cur_err; 1005 best_clk = cur_clk; 1006 *N2 = cur_N; 1007 *N1 = n_ref; 1008 *P1 = p_ref; 1009 upper = false; 1010 } 1011 1012 cur_N += 1; 1013 cur_err = gk104_calc_pll_output(0xf000, 1, cur_N, 1, cur_clk) 1014 - target_khz; 1015 if (cur_err < best_err) { 1016 best_err = cur_err; 1017 best_clk = cur_clk; 1018 *N2 = cur_N; 1019 *N1 = n_ref; 1020 *P1 = p_ref; 1021 upper = true; 1022 } 1023 } 1024 } 1025 1026 /* adjust fN to get closer to the target clock */ 1027 *fN1 = (u16)((((best_err / *N2 * *P2) * (*P1 * *M1)) << 13) / crystal); 1028 if (upper) 1029 *fN1 = (u16)(1 - *fN1); 1030 1031 return gk104_calc_pll_output(*fN1, 1, *N1, *P1, crystal); 1032 } 1033 1034 static int 1035 gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next) 1036 { 1037 struct gk104_ramfuc *fuc = &ram->fuc; 1038 struct nvkm_subdev *subdev = &ram->base.fb->subdev; 1039 int refclk, i; 1040 int ret; 1041 1042 ret = ram_init(fuc, ram->base.fb); 1043 if (ret) 1044 return ret; 1045 1046 ram->mode = (next->freq > fuc->refpll.vco1.max_freq) ? 2 : 1; 1047 ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f; 1048 1049 /* XXX: this is *not* what nvidia do. on fermi nvidia generally 1050 * select, based on some unknown condition, one of the two possible 1051 * reference frequencies listed in the vbios table for mempll and 1052 * program refpll to that frequency. 1053 * 1054 * so far, i've seen very weird values being chosen by nvidia on 1055 * kepler boards, no idea how/why they're chosen. 1056 */ 1057 refclk = next->freq; 1058 if (ram->mode == 2) { 1059 ret = gk104_pll_calc_hiclk(next->freq, subdev->device->crystal, 1060 &ram->N1, &ram->fN1, &ram->M1, &ram->P1, 1061 &ram->N2, &ram->M2, &ram->P2); 1062 fuc->mempll.refclk = ret; 1063 if (ret <= 0) { 1064 nvkm_error(subdev, "unable to calc plls\n"); 1065 return -EINVAL; 1066 } 1067 nvkm_debug(subdev, "sucessfully calced PLLs for clock %i kHz" 1068 " (refclock: %i kHz)\n", next->freq, ret); 1069 } else { 1070 /* calculate refpll coefficients */ 1071 ret = gt215_pll_calc(subdev, &fuc->refpll, refclk, &ram->N1, 1072 &ram->fN1, &ram->M1, &ram->P1); 1073 fuc->mempll.refclk = ret; 1074 if (ret <= 0) { 1075 nvkm_error(subdev, "unable to calc refpll\n"); 1076 return -EINVAL; 1077 } 1078 } 1079 1080 for (i = 0; i < ARRAY_SIZE(fuc->r_mr); i++) { 1081 if (ram_have(fuc, mr[i])) 1082 ram->base.mr[i] = ram_rd32(fuc, mr[i]); 1083 } 1084 ram->base.freq = next->freq; 1085 1086 switch (ram->base.type) { 1087 case NVKM_RAM_TYPE_DDR3: 1088 ret = nvkm_sddr3_calc(&ram->base); 1089 if (ret == 0) 1090 ret = gk104_ram_calc_sddr3(ram, next->freq); 1091 break; 1092 case NVKM_RAM_TYPE_GDDR5: 1093 ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0); 1094 if (ret == 0) 1095 ret = gk104_ram_calc_gddr5(ram, next->freq); 1096 break; 1097 default: 1098 ret = -ENOSYS; 1099 break; 1100 } 1101 1102 return ret; 1103 } 1104 1105 static int 1106 gk104_ram_calc(struct nvkm_ram *base, u32 freq) 1107 { 1108 struct gk104_ram *ram = gk104_ram(base); 1109 struct nvkm_clk *clk = ram->base.fb->subdev.device->clk; 1110 struct nvkm_ram_data *xits = &ram->base.xition; 1111 struct nvkm_ram_data *copy; 1112 int ret; 1113 1114 if (ram->base.next == NULL) { 1115 ret = gk104_ram_calc_data(ram, 1116 nvkm_clk_read(clk, nv_clk_src_mem), 1117 &ram->base.former); 1118 if (ret) 1119 return ret; 1120 1121 ret = gk104_ram_calc_data(ram, freq, &ram->base.target); 1122 if (ret) 1123 return ret; 1124 1125 if (ram->base.target.freq < ram->base.former.freq) { 1126 *xits = ram->base.target; 1127 copy = &ram->base.former; 1128 } else { 1129 *xits = ram->base.former; 1130 copy = &ram->base.target; 1131 } 1132 1133 xits->bios.ramcfg_11_02_04 = copy->bios.ramcfg_11_02_04; 1134 xits->bios.ramcfg_11_02_03 = copy->bios.ramcfg_11_02_03; 1135 xits->bios.timing_20_30_07 = copy->bios.timing_20_30_07; 1136 1137 ram->base.next = &ram->base.target; 1138 if (memcmp(xits, &ram->base.former, sizeof(xits->bios))) 1139 ram->base.next = &ram->base.xition; 1140 } else { 1141 BUG_ON(ram->base.next != &ram->base.xition); 1142 ram->base.next = &ram->base.target; 1143 } 1144 1145 return gk104_ram_calc_xits(ram, ram->base.next); 1146 } 1147 1148 static void 1149 gk104_ram_prog_0(struct gk104_ram *ram, u32 freq) 1150 { 1151 struct nvkm_device *device = ram->base.fb->subdev.device; 1152 struct nvkm_ram_data *cfg; 1153 u32 mhz = freq / 1000; 1154 u32 mask, data; 1155 1156 list_for_each_entry(cfg, &ram->cfg, head) { 1157 if (mhz >= cfg->bios.rammap_min && 1158 mhz <= cfg->bios.rammap_max) 1159 break; 1160 } 1161 1162 if (&cfg->head == &ram->cfg) 1163 return; 1164 1165 if (mask = 0, data = 0, ram->diff.rammap_11_0a_03fe) { 1166 data |= cfg->bios.rammap_11_0a_03fe << 12; 1167 mask |= 0x001ff000; 1168 } 1169 if (ram->diff.rammap_11_09_01ff) { 1170 data |= cfg->bios.rammap_11_09_01ff; 1171 mask |= 0x000001ff; 1172 } 1173 nvkm_mask(device, 0x10f468, mask, data); 1174 1175 if (mask = 0, data = 0, ram->diff.rammap_11_0a_0400) { 1176 data |= cfg->bios.rammap_11_0a_0400; 1177 mask |= 0x00000001; 1178 } 1179 nvkm_mask(device, 0x10f420, mask, data); 1180 1181 if (mask = 0, data = 0, ram->diff.rammap_11_0a_0800) { 1182 data |= cfg->bios.rammap_11_0a_0800; 1183 mask |= 0x00000001; 1184 } 1185 nvkm_mask(device, 0x10f430, mask, data); 1186 1187 if (mask = 0, data = 0, ram->diff.rammap_11_0b_01f0) { 1188 data |= cfg->bios.rammap_11_0b_01f0; 1189 mask |= 0x0000001f; 1190 } 1191 nvkm_mask(device, 0x10f400, mask, data); 1192 1193 if (mask = 0, data = 0, ram->diff.rammap_11_0b_0200) { 1194 data |= cfg->bios.rammap_11_0b_0200 << 9; 1195 mask |= 0x00000200; 1196 } 1197 nvkm_mask(device, 0x10f410, mask, data); 1198 1199 if (mask = 0, data = 0, ram->diff.rammap_11_0d) { 1200 data |= cfg->bios.rammap_11_0d << 16; 1201 mask |= 0x00ff0000; 1202 } 1203 if (ram->diff.rammap_11_0f) { 1204 data |= cfg->bios.rammap_11_0f << 8; 1205 mask |= 0x0000ff00; 1206 } 1207 nvkm_mask(device, 0x10f440, mask, data); 1208 1209 if (mask = 0, data = 0, ram->diff.rammap_11_0e) { 1210 data |= cfg->bios.rammap_11_0e << 8; 1211 mask |= 0x0000ff00; 1212 } 1213 if (ram->diff.rammap_11_0b_0800) { 1214 data |= cfg->bios.rammap_11_0b_0800 << 7; 1215 mask |= 0x00000080; 1216 } 1217 if (ram->diff.rammap_11_0b_0400) { 1218 data |= cfg->bios.rammap_11_0b_0400 << 5; 1219 mask |= 0x00000020; 1220 } 1221 nvkm_mask(device, 0x10f444, mask, data); 1222 } 1223 1224 static int 1225 gk104_ram_prog(struct nvkm_ram *base) 1226 { 1227 struct gk104_ram *ram = gk104_ram(base); 1228 struct gk104_ramfuc *fuc = &ram->fuc; 1229 struct nvkm_device *device = ram->base.fb->subdev.device; 1230 struct nvkm_ram_data *next = ram->base.next; 1231 1232 if (!nvkm_boolopt(device->cfgopt, "NvMemExec", true)) { 1233 ram_exec(fuc, false); 1234 return (ram->base.next == &ram->base.xition); 1235 } 1236 1237 gk104_ram_prog_0(ram, 1000); 1238 ram_exec(fuc, true); 1239 gk104_ram_prog_0(ram, next->freq); 1240 1241 return (ram->base.next == &ram->base.xition); 1242 } 1243 1244 static void 1245 gk104_ram_tidy(struct nvkm_ram *base) 1246 { 1247 struct gk104_ram *ram = gk104_ram(base); 1248 ram->base.next = NULL; 1249 ram_exec(&ram->fuc, false); 1250 } 1251 1252 struct gk104_ram_train { 1253 u16 mask; 1254 struct nvbios_M0209S remap; 1255 struct nvbios_M0209S type00; 1256 struct nvbios_M0209S type01; 1257 struct nvbios_M0209S type04; 1258 struct nvbios_M0209S type06; 1259 struct nvbios_M0209S type07; 1260 struct nvbios_M0209S type08; 1261 struct nvbios_M0209S type09; 1262 }; 1263 1264 static int 1265 gk104_ram_train_type(struct nvkm_ram *ram, int i, u8 ramcfg, 1266 struct gk104_ram_train *train) 1267 { 1268 struct nvkm_bios *bios = ram->fb->subdev.device->bios; 1269 struct nvbios_M0205E M0205E; 1270 struct nvbios_M0205S M0205S; 1271 struct nvbios_M0209E M0209E; 1272 struct nvbios_M0209S *remap = &train->remap; 1273 struct nvbios_M0209S *value; 1274 u8 ver, hdr, cnt, len; 1275 u32 data; 1276 1277 /* determine type of data for this index */ 1278 if (!(data = nvbios_M0205Ep(bios, i, &ver, &hdr, &cnt, &len, &M0205E))) 1279 return -ENOENT; 1280 1281 switch (M0205E.type) { 1282 case 0x00: value = &train->type00; break; 1283 case 0x01: value = &train->type01; break; 1284 case 0x04: value = &train->type04; break; 1285 case 0x06: value = &train->type06; break; 1286 case 0x07: value = &train->type07; break; 1287 case 0x08: value = &train->type08; break; 1288 case 0x09: value = &train->type09; break; 1289 default: 1290 return 0; 1291 } 1292 1293 /* training data index determined by ramcfg strap */ 1294 if (!(data = nvbios_M0205Sp(bios, i, ramcfg, &ver, &hdr, &M0205S))) 1295 return -EINVAL; 1296 i = M0205S.data; 1297 1298 /* training data format information */ 1299 if (!(data = nvbios_M0209Ep(bios, i, &ver, &hdr, &cnt, &len, &M0209E))) 1300 return -EINVAL; 1301 1302 /* ... and the raw data */ 1303 if (!(data = nvbios_M0209Sp(bios, i, 0, &ver, &hdr, value))) 1304 return -EINVAL; 1305 1306 if (M0209E.v02_07 == 2) { 1307 /* of course! why wouldn't we have a pointer to another entry 1308 * in the same table, and use the first one as an array of 1309 * remap indices... 1310 */ 1311 if (!(data = nvbios_M0209Sp(bios, M0209E.v03, 0, &ver, &hdr, 1312 remap))) 1313 return -EINVAL; 1314 1315 for (i = 0; i < ARRAY_SIZE(value->data); i++) 1316 value->data[i] = remap->data[value->data[i]]; 1317 } else 1318 if (M0209E.v02_07 != 1) 1319 return -EINVAL; 1320 1321 train->mask |= 1 << M0205E.type; 1322 return 0; 1323 } 1324 1325 static int 1326 gk104_ram_train_init_0(struct nvkm_ram *ram, struct gk104_ram_train *train) 1327 { 1328 struct nvkm_subdev *subdev = &ram->fb->subdev; 1329 struct nvkm_device *device = subdev->device; 1330 int i, j; 1331 1332 if ((train->mask & 0x03d3) != 0x03d3) { 1333 nvkm_warn(subdev, "missing link training data\n"); 1334 return -EINVAL; 1335 } 1336 1337 for (i = 0; i < 0x30; i++) { 1338 for (j = 0; j < 8; j += 4) { 1339 nvkm_wr32(device, 0x10f968 + j, 0x00000000 | (i << 8)); 1340 nvkm_wr32(device, 0x10f920 + j, 0x00000000 | 1341 train->type08.data[i] << 4 | 1342 train->type06.data[i]); 1343 nvkm_wr32(device, 0x10f918 + j, train->type00.data[i]); 1344 nvkm_wr32(device, 0x10f920 + j, 0x00000100 | 1345 train->type09.data[i] << 4 | 1346 train->type07.data[i]); 1347 nvkm_wr32(device, 0x10f918 + j, train->type01.data[i]); 1348 } 1349 } 1350 1351 for (j = 0; j < 8; j += 4) { 1352 for (i = 0; i < 0x100; i++) { 1353 nvkm_wr32(device, 0x10f968 + j, i); 1354 nvkm_wr32(device, 0x10f900 + j, train->type04.data[i]); 1355 } 1356 } 1357 1358 return 0; 1359 } 1360 1361 static int 1362 gk104_ram_train_init(struct nvkm_ram *ram) 1363 { 1364 u8 ramcfg = nvbios_ramcfg_index(&ram->fb->subdev); 1365 struct gk104_ram_train *train; 1366 int ret, i; 1367 1368 if (!(train = kzalloc(sizeof(*train), GFP_KERNEL))) 1369 return -ENOMEM; 1370 1371 for (i = 0; i < 0x100; i++) { 1372 ret = gk104_ram_train_type(ram, i, ramcfg, train); 1373 if (ret && ret != -ENOENT) 1374 break; 1375 } 1376 1377 switch (ram->type) { 1378 case NVKM_RAM_TYPE_GDDR5: 1379 ret = gk104_ram_train_init_0(ram, train); 1380 break; 1381 default: 1382 ret = 0; 1383 break; 1384 } 1385 1386 kfree(train); 1387 return ret; 1388 } 1389 1390 int 1391 gk104_ram_init(struct nvkm_ram *ram) 1392 { 1393 struct nvkm_subdev *subdev = &ram->fb->subdev; 1394 struct nvkm_device *device = subdev->device; 1395 struct nvkm_bios *bios = device->bios; 1396 u8 ver, hdr, cnt, len, snr, ssz; 1397 u32 data, save; 1398 int i; 1399 1400 /* run a bunch of tables from rammap table. there's actually 1401 * individual pointers for each rammap entry too, but, nvidia 1402 * seem to just run the last two entries' scripts early on in 1403 * their init, and never again.. we'll just run 'em all once 1404 * for now. 1405 * 1406 * i strongly suspect that each script is for a separate mode 1407 * (likely selected by 0x10f65c's lower bits?), and the 1408 * binary driver skips the one that's already been setup by 1409 * the init tables. 1410 */ 1411 data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz); 1412 if (!data || hdr < 0x15) 1413 return -EINVAL; 1414 1415 cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */ 1416 data = nvbios_rd32(bios, data + 0x10); /* guess u32... */ 1417 save = nvkm_rd32(device, 0x10f65c) & 0x000000f0; 1418 for (i = 0; i < cnt; i++, data += 4) { 1419 if (i != save >> 4) { 1420 nvkm_mask(device, 0x10f65c, 0x000000f0, i << 4); 1421 nvbios_exec(&(struct nvbios_init) { 1422 .subdev = subdev, 1423 .bios = bios, 1424 .offset = nvbios_rd32(bios, data), 1425 .execute = 1, 1426 }); 1427 } 1428 } 1429 nvkm_mask(device, 0x10f65c, 0x000000f0, save); 1430 nvkm_mask(device, 0x10f584, 0x11000000, 0x00000000); 1431 nvkm_wr32(device, 0x10ecc0, 0xffffffff); 1432 nvkm_mask(device, 0x10f160, 0x00000010, 0x00000010); 1433 1434 return gk104_ram_train_init(ram); 1435 } 1436 1437 static int 1438 gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i) 1439 { 1440 struct nvkm_bios *bios = ram->base.fb->subdev.device->bios; 1441 struct nvkm_ram_data *cfg; 1442 struct nvbios_ramcfg *d = &ram->diff; 1443 struct nvbios_ramcfg *p, *n; 1444 u8 ver, hdr, cnt, len; 1445 u32 data; 1446 int ret; 1447 1448 if (!(cfg = kmalloc(sizeof(*cfg), GFP_KERNEL))) 1449 return -ENOMEM; 1450 p = &list_last_entry(&ram->cfg, typeof(*cfg), head)->bios; 1451 n = &cfg->bios; 1452 1453 /* memory config data for a range of target frequencies */ 1454 data = nvbios_rammapEp(bios, i, &ver, &hdr, &cnt, &len, &cfg->bios); 1455 if (ret = -ENOENT, !data) 1456 goto done; 1457 if (ret = -ENOSYS, ver != 0x11 || hdr < 0x12) 1458 goto done; 1459 1460 /* ... and a portion specific to the attached memory */ 1461 data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, ramcfg, 1462 &ver, &hdr, &cfg->bios); 1463 if (ret = -EINVAL, !data) 1464 goto done; 1465 if (ret = -ENOSYS, ver != 0x11 || hdr < 0x0a) 1466 goto done; 1467 1468 /* lookup memory timings, if bios says they're present */ 1469 if (cfg->bios.ramcfg_timing != 0xff) { 1470 data = nvbios_timingEp(bios, cfg->bios.ramcfg_timing, 1471 &ver, &hdr, &cnt, &len, 1472 &cfg->bios); 1473 if (ret = -EINVAL, !data) 1474 goto done; 1475 if (ret = -ENOSYS, ver != 0x20 || hdr < 0x33) 1476 goto done; 1477 } 1478 1479 list_add_tail(&cfg->head, &ram->cfg); 1480 if (ret = 0, i == 0) 1481 goto done; 1482 1483 d->rammap_11_0a_03fe |= p->rammap_11_0a_03fe != n->rammap_11_0a_03fe; 1484 d->rammap_11_09_01ff |= p->rammap_11_09_01ff != n->rammap_11_09_01ff; 1485 d->rammap_11_0a_0400 |= p->rammap_11_0a_0400 != n->rammap_11_0a_0400; 1486 d->rammap_11_0a_0800 |= p->rammap_11_0a_0800 != n->rammap_11_0a_0800; 1487 d->rammap_11_0b_01f0 |= p->rammap_11_0b_01f0 != n->rammap_11_0b_01f0; 1488 d->rammap_11_0b_0200 |= p->rammap_11_0b_0200 != n->rammap_11_0b_0200; 1489 d->rammap_11_0d |= p->rammap_11_0d != n->rammap_11_0d; 1490 d->rammap_11_0f |= p->rammap_11_0f != n->rammap_11_0f; 1491 d->rammap_11_0e |= p->rammap_11_0e != n->rammap_11_0e; 1492 d->rammap_11_0b_0800 |= p->rammap_11_0b_0800 != n->rammap_11_0b_0800; 1493 d->rammap_11_0b_0400 |= p->rammap_11_0b_0400 != n->rammap_11_0b_0400; 1494 d->ramcfg_11_01_01 |= p->ramcfg_11_01_01 != n->ramcfg_11_01_01; 1495 d->ramcfg_11_01_02 |= p->ramcfg_11_01_02 != n->ramcfg_11_01_02; 1496 d->ramcfg_11_01_10 |= p->ramcfg_11_01_10 != n->ramcfg_11_01_10; 1497 d->ramcfg_11_02_03 |= p->ramcfg_11_02_03 != n->ramcfg_11_02_03; 1498 d->ramcfg_11_08_20 |= p->ramcfg_11_08_20 != n->ramcfg_11_08_20; 1499 d->timing_20_30_07 |= p->timing_20_30_07 != n->timing_20_30_07; 1500 done: 1501 if (ret) 1502 kfree(cfg); 1503 return ret; 1504 } 1505 1506 static void * 1507 gk104_ram_dtor(struct nvkm_ram *base) 1508 { 1509 struct gk104_ram *ram = gk104_ram(base); 1510 struct nvkm_ram_data *cfg, *tmp; 1511 1512 list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) { 1513 kfree(cfg); 1514 } 1515 1516 return ram; 1517 } 1518 1519 static const struct nvkm_ram_func 1520 gk104_ram_func = { 1521 .dtor = gk104_ram_dtor, 1522 .init = gk104_ram_init, 1523 .get = gf100_ram_get, 1524 .put = gf100_ram_put, 1525 .calc = gk104_ram_calc, 1526 .prog = gk104_ram_prog, 1527 .tidy = gk104_ram_tidy, 1528 }; 1529 1530 int 1531 gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) 1532 { 1533 struct nvkm_subdev *subdev = &fb->subdev; 1534 struct nvkm_device *device = subdev->device; 1535 struct nvkm_bios *bios = device->bios; 1536 struct nvkm_gpio *gpio = device->gpio; 1537 struct dcb_gpio_func func; 1538 struct gk104_ram *ram; 1539 int ret, i; 1540 u8 ramcfg = nvbios_ramcfg_index(subdev); 1541 u32 tmp; 1542 1543 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL))) 1544 return -ENOMEM; 1545 *pram = &ram->base; 1546 1547 ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base); 1548 if (ret) 1549 return ret; 1550 1551 INIT_LIST_HEAD(&ram->cfg); 1552 1553 /* calculate a mask of differently configured memory partitions, 1554 * because, of course reclocking wasn't complicated enough 1555 * already without having to treat some of them differently to 1556 * the others.... 1557 */ 1558 ram->parts = nvkm_rd32(device, 0x022438); 1559 ram->pmask = nvkm_rd32(device, 0x022554); 1560 ram->pnuts = 0; 1561 for (i = 0, tmp = 0; i < ram->parts; i++) { 1562 if (!(ram->pmask & (1 << i))) { 1563 u32 cfg1 = nvkm_rd32(device, 0x110204 + (i * 0x1000)); 1564 if (tmp && tmp != cfg1) { 1565 ram->pnuts |= (1 << i); 1566 continue; 1567 } 1568 tmp = cfg1; 1569 } 1570 } 1571 1572 /* parse bios data for all rammap table entries up-front, and 1573 * build information on whether certain fields differ between 1574 * any of the entries. 1575 * 1576 * the binary driver appears to completely ignore some fields 1577 * when all entries contain the same value. at first, it was 1578 * hoped that these were mere optimisations and the bios init 1579 * tables had configured as per the values here, but there is 1580 * evidence now to suggest that this isn't the case and we do 1581 * need to treat this condition as a "don't touch" indicator. 1582 */ 1583 for (i = 0; !ret; i++) { 1584 ret = gk104_ram_ctor_data(ram, ramcfg, i); 1585 if (ret && ret != -ENOENT) { 1586 nvkm_error(subdev, "failed to parse ramcfg data\n"); 1587 return ret; 1588 } 1589 } 1590 1591 /* parse bios data for both pll's */ 1592 ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll); 1593 if (ret) { 1594 nvkm_error(subdev, "mclk refpll data not found\n"); 1595 return ret; 1596 } 1597 1598 ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll); 1599 if (ret) { 1600 nvkm_error(subdev, "mclk pll data not found\n"); 1601 return ret; 1602 } 1603 1604 /* lookup memory voltage gpios */ 1605 ret = nvkm_gpio_find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func); 1606 if (ret == 0) { 1607 ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04)); 1608 ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12; 1609 ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12; 1610 } 1611 1612 ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func); 1613 if (ret == 0) { 1614 ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04)); 1615 ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12; 1616 ram->fuc.r_func2E[1] = (func.log[1] ^ 2) << 12; 1617 } 1618 1619 ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604); 1620 1621 ram->fuc.r_0x132020 = ramfuc_reg(0x132020); 1622 ram->fuc.r_0x132028 = ramfuc_reg(0x132028); 1623 ram->fuc.r_0x132024 = ramfuc_reg(0x132024); 1624 ram->fuc.r_0x132030 = ramfuc_reg(0x132030); 1625 ram->fuc.r_0x132034 = ramfuc_reg(0x132034); 1626 ram->fuc.r_0x132000 = ramfuc_reg(0x132000); 1627 ram->fuc.r_0x132004 = ramfuc_reg(0x132004); 1628 ram->fuc.r_0x132040 = ramfuc_reg(0x132040); 1629 1630 ram->fuc.r_0x10f248 = ramfuc_reg(0x10f248); 1631 ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290); 1632 ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294); 1633 ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298); 1634 ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c); 1635 ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0); 1636 ram->fuc.r_0x10f2a4 = ramfuc_reg(0x10f2a4); 1637 ram->fuc.r_0x10f2a8 = ramfuc_reg(0x10f2a8); 1638 ram->fuc.r_0x10f2ac = ramfuc_reg(0x10f2ac); 1639 ram->fuc.r_0x10f2cc = ramfuc_reg(0x10f2cc); 1640 ram->fuc.r_0x10f2e8 = ramfuc_reg(0x10f2e8); 1641 ram->fuc.r_0x10f250 = ramfuc_reg(0x10f250); 1642 ram->fuc.r_0x10f24c = ramfuc_reg(0x10f24c); 1643 ram->fuc.r_0x10fec4 = ramfuc_reg(0x10fec4); 1644 ram->fuc.r_0x10fec8 = ramfuc_reg(0x10fec8); 1645 ram->fuc.r_0x10f604 = ramfuc_reg(0x10f604); 1646 ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614); 1647 ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610); 1648 ram->fuc.r_0x100770 = ramfuc_reg(0x100770); 1649 ram->fuc.r_0x100778 = ramfuc_reg(0x100778); 1650 ram->fuc.r_0x10f224 = ramfuc_reg(0x10f224); 1651 1652 ram->fuc.r_0x10f870 = ramfuc_reg(0x10f870); 1653 ram->fuc.r_0x10f698 = ramfuc_reg(0x10f698); 1654 ram->fuc.r_0x10f694 = ramfuc_reg(0x10f694); 1655 ram->fuc.r_0x10f6b8 = ramfuc_reg(0x10f6b8); 1656 ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808); 1657 ram->fuc.r_0x10f670 = ramfuc_reg(0x10f670); 1658 ram->fuc.r_0x10f60c = ramfuc_reg(0x10f60c); 1659 ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830); 1660 ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec); 1661 ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800); 1662 ram->fuc.r_0x10f82c = ramfuc_reg(0x10f82c); 1663 1664 ram->fuc.r_0x10f978 = ramfuc_reg(0x10f978); 1665 ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910); 1666 ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914); 1667 1668 switch (ram->base.type) { 1669 case NVKM_RAM_TYPE_GDDR5: 1670 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300); 1671 ram->fuc.r_mr[1] = ramfuc_reg(0x10f330); 1672 ram->fuc.r_mr[2] = ramfuc_reg(0x10f334); 1673 ram->fuc.r_mr[3] = ramfuc_reg(0x10f338); 1674 ram->fuc.r_mr[4] = ramfuc_reg(0x10f33c); 1675 ram->fuc.r_mr[5] = ramfuc_reg(0x10f340); 1676 ram->fuc.r_mr[6] = ramfuc_reg(0x10f344); 1677 ram->fuc.r_mr[7] = ramfuc_reg(0x10f348); 1678 ram->fuc.r_mr[8] = ramfuc_reg(0x10f354); 1679 ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c); 1680 break; 1681 case NVKM_RAM_TYPE_DDR3: 1682 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300); 1683 ram->fuc.r_mr[1] = ramfuc_reg(0x10f304); 1684 ram->fuc.r_mr[2] = ramfuc_reg(0x10f320); 1685 break; 1686 default: 1687 break; 1688 } 1689 1690 ram->fuc.r_0x62c000 = ramfuc_reg(0x62c000); 1691 ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200); 1692 ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210); 1693 ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310); 1694 ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314); 1695 ram->fuc.r_0x10f318 = ramfuc_reg(0x10f318); 1696 ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090); 1697 ram->fuc.r_0x10f69c = ramfuc_reg(0x10f69c); 1698 ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824); 1699 ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0); 1700 ram->fuc.r_0x1373f4 = ramfuc_reg(0x1373f4); 1701 ram->fuc.r_0x137320 = ramfuc_reg(0x137320); 1702 ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c); 1703 ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc); 1704 ram->fuc.r_0x100710 = ramfuc_reg(0x100710); 1705 ram->fuc.r_0x100750 = ramfuc_reg(0x100750); 1706 return 0; 1707 } 1708