1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gf100.h" 25 #include "ramfuc.h" 26 27 #include <core/option.h> 28 #include <subdev/bios.h> 29 #include <subdev/bios/pll.h> 30 #include <subdev/bios/rammap.h> 31 #include <subdev/bios/timing.h> 32 #include <subdev/clk.h> 33 #include <subdev/clk/pll.h> 34 #include <subdev/ltc.h> 35 36 struct gf100_ramfuc { 37 struct ramfuc base; 38 39 struct ramfuc_reg r_0x10fe20; 40 struct ramfuc_reg r_0x10fe24; 41 struct ramfuc_reg r_0x137320; 42 struct ramfuc_reg r_0x137330; 43 44 struct ramfuc_reg r_0x132000; 45 struct ramfuc_reg r_0x132004; 46 struct ramfuc_reg r_0x132100; 47 48 struct ramfuc_reg r_0x137390; 49 50 struct ramfuc_reg r_0x10f290; 51 struct ramfuc_reg r_0x10f294; 52 struct ramfuc_reg r_0x10f298; 53 struct ramfuc_reg r_0x10f29c; 54 struct ramfuc_reg r_0x10f2a0; 55 56 struct ramfuc_reg r_0x10f300; 57 struct ramfuc_reg r_0x10f338; 58 struct ramfuc_reg r_0x10f340; 59 struct ramfuc_reg r_0x10f344; 60 struct ramfuc_reg r_0x10f348; 61 62 struct ramfuc_reg r_0x10f910; 63 struct ramfuc_reg r_0x10f914; 64 65 struct ramfuc_reg r_0x100b0c; 66 struct ramfuc_reg r_0x10f050; 67 struct ramfuc_reg r_0x10f090; 68 struct ramfuc_reg r_0x10f200; 69 struct ramfuc_reg r_0x10f210; 70 struct ramfuc_reg r_0x10f310; 71 struct ramfuc_reg r_0x10f314; 72 struct ramfuc_reg r_0x10f610; 73 struct ramfuc_reg r_0x10f614; 74 struct ramfuc_reg r_0x10f800; 75 struct ramfuc_reg r_0x10f808; 76 struct ramfuc_reg r_0x10f824; 77 struct ramfuc_reg r_0x10f830; 78 struct ramfuc_reg r_0x10f988; 79 struct ramfuc_reg r_0x10f98c; 80 struct ramfuc_reg r_0x10f990; 81 struct ramfuc_reg r_0x10f998; 82 struct ramfuc_reg r_0x10f9b0; 83 struct ramfuc_reg r_0x10f9b4; 84 struct ramfuc_reg r_0x10fb04; 85 struct ramfuc_reg r_0x10fb08; 86 struct ramfuc_reg r_0x137300; 87 struct ramfuc_reg r_0x137310; 88 struct ramfuc_reg r_0x137360; 89 struct ramfuc_reg r_0x1373ec; 90 struct ramfuc_reg r_0x1373f0; 91 struct ramfuc_reg r_0x1373f8; 92 93 struct ramfuc_reg r_0x61c140; 94 struct ramfuc_reg r_0x611200; 95 96 struct ramfuc_reg r_0x13d8f4; 97 }; 98 99 struct gf100_ram { 100 struct nvkm_ram base; 101 struct gf100_ramfuc fuc; 102 struct nvbios_pll refpll; 103 struct nvbios_pll mempll; 104 }; 105 106 static void 107 gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic) 108 { 109 struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc); 110 struct nvkm_fb *fb = nvkm_fb(ram); 111 struct nvkm_device *device = fb->subdev.device; 112 u32 part = nvkm_rd32(device, 0x022438), i; 113 u32 mask = nvkm_rd32(device, 0x022554); 114 u32 addr = 0x110974; 115 116 ram_wr32(fuc, 0x10f910, magic); 117 ram_wr32(fuc, 0x10f914, magic); 118 119 for (i = 0; (magic & 0x80000000) && i < part; addr += 0x1000, i++) { 120 if (mask & (1 << i)) 121 continue; 122 ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000); 123 } 124 } 125 126 static int 127 gf100_ram_calc(struct nvkm_fb *fb, u32 freq) 128 { 129 struct nvkm_subdev *subdev = &fb->subdev; 130 struct nvkm_device *device = subdev->device; 131 struct nvkm_clk *clk = device->clk; 132 struct nvkm_bios *bios = device->bios; 133 struct gf100_ram *ram = (void *)fb->ram; 134 struct gf100_ramfuc *fuc = &ram->fuc; 135 struct nvbios_ramcfg cfg; 136 u8 ver, cnt, len, strap; 137 struct { 138 u32 data; 139 u8 size; 140 } rammap, ramcfg, timing; 141 int ref, div, out; 142 int from, mode; 143 int N1, M1, P; 144 int ret; 145 146 /* lookup memory config data relevant to the target frequency */ 147 rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size, 148 &cnt, &ramcfg.size, &cfg); 149 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) { 150 nvkm_error(subdev, "invalid/missing rammap entry\n"); 151 return -EINVAL; 152 } 153 154 /* locate specific data set for the attached memory */ 155 strap = nvbios_ramcfg_index(nv_subdev(fb)); 156 if (strap >= cnt) { 157 nvkm_error(subdev, "invalid ramcfg strap\n"); 158 return -EINVAL; 159 } 160 161 ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size); 162 if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) { 163 nvkm_error(subdev, "invalid/missing ramcfg entry\n"); 164 return -EINVAL; 165 } 166 167 /* lookup memory timings, if bios says they're present */ 168 strap = nv_ro08(bios, ramcfg.data + 0x01); 169 if (strap != 0xff) { 170 timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size, 171 &cnt, &len); 172 if (!timing.data || ver != 0x10 || timing.size < 0x19) { 173 nvkm_error(subdev, "invalid/missing timing entry\n"); 174 return -EINVAL; 175 } 176 } else { 177 timing.data = 0; 178 } 179 180 ret = ram_init(fuc, fb); 181 if (ret) 182 return ret; 183 184 /* determine current mclk configuration */ 185 from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */ 186 187 /* determine target mclk configuration */ 188 if (!(ram_rd32(fuc, 0x137300) & 0x00000100)) 189 ref = clk->read(clk, nv_clk_src_sppll0); 190 else 191 ref = clk->read(clk, nv_clk_src_sppll1); 192 div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2; 193 out = (ref * 2) / (div + 2); 194 mode = freq != out; 195 196 ram_mask(fuc, 0x137360, 0x00000002, 0x00000000); 197 198 if ((ram_rd32(fuc, 0x132000) & 0x00000002) || 0 /*XXX*/) { 199 ram_nuke(fuc, 0x132000); 200 ram_mask(fuc, 0x132000, 0x00000002, 0x00000002); 201 ram_mask(fuc, 0x132000, 0x00000002, 0x00000000); 202 } 203 204 if (mode == 1) { 205 ram_nuke(fuc, 0x10fe20); 206 ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000002); 207 ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000000); 208 } 209 210 // 0x00020034 // 0x0000000a 211 ram_wr32(fuc, 0x132100, 0x00000001); 212 213 if (mode == 1 && from == 0) { 214 /* calculate refpll */ 215 ret = gt215_pll_calc(nv_subdev(fb), &ram->refpll, 216 ram->mempll.refclk, &N1, NULL, &M1, &P); 217 if (ret <= 0) { 218 nvkm_error(subdev, "unable to calc refpll\n"); 219 return ret ? ret : -ERANGE; 220 } 221 222 ram_wr32(fuc, 0x10fe20, 0x20010000); 223 ram_wr32(fuc, 0x137320, 0x00000003); 224 ram_wr32(fuc, 0x137330, 0x81200006); 225 ram_wr32(fuc, 0x10fe24, (P << 16) | (N1 << 8) | M1); 226 ram_wr32(fuc, 0x10fe20, 0x20010001); 227 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000); 228 229 /* calculate mempll */ 230 ret = gt215_pll_calc(nv_subdev(fb), &ram->mempll, freq, 231 &N1, NULL, &M1, &P); 232 if (ret <= 0) { 233 nvkm_error(subdev, "unable to calc refpll\n"); 234 return ret ? ret : -ERANGE; 235 } 236 237 ram_wr32(fuc, 0x10fe20, 0x20010005); 238 ram_wr32(fuc, 0x132004, (P << 16) | (N1 << 8) | M1); 239 ram_wr32(fuc, 0x132000, 0x18010101); 240 ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000); 241 } else 242 if (mode == 0) { 243 ram_wr32(fuc, 0x137300, 0x00000003); 244 } 245 246 if (from == 0) { 247 ram_nuke(fuc, 0x10fb04); 248 ram_mask(fuc, 0x10fb04, 0x0000ffff, 0x00000000); 249 ram_nuke(fuc, 0x10fb08); 250 ram_mask(fuc, 0x10fb08, 0x0000ffff, 0x00000000); 251 ram_wr32(fuc, 0x10f988, 0x2004ff00); 252 ram_wr32(fuc, 0x10f98c, 0x003fc040); 253 ram_wr32(fuc, 0x10f990, 0x20012001); 254 ram_wr32(fuc, 0x10f998, 0x00011a00); 255 ram_wr32(fuc, 0x13d8f4, 0x00000000); 256 } else { 257 ram_wr32(fuc, 0x10f988, 0x20010000); 258 ram_wr32(fuc, 0x10f98c, 0x00000000); 259 ram_wr32(fuc, 0x10f990, 0x20012001); 260 ram_wr32(fuc, 0x10f998, 0x00010a00); 261 } 262 263 if (from == 0) { 264 // 0x00020039 // 0x000000ba 265 } 266 267 // 0x0002003a // 0x00000002 268 ram_wr32(fuc, 0x100b0c, 0x00080012); 269 // 0x00030014 // 0x00000000 // 0x02b5f070 270 // 0x00030014 // 0x00010000 // 0x02b5f070 271 ram_wr32(fuc, 0x611200, 0x00003300); 272 // 0x00020034 // 0x0000000a 273 // 0x00030020 // 0x00000001 // 0x00000000 274 275 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000); 276 ram_wr32(fuc, 0x10f210, 0x00000000); 277 ram_nsec(fuc, 1000); 278 if (mode == 0) 279 gf100_ram_train(fuc, 0x000c1001); 280 ram_wr32(fuc, 0x10f310, 0x00000001); 281 ram_nsec(fuc, 1000); 282 ram_wr32(fuc, 0x10f090, 0x00000061); 283 ram_wr32(fuc, 0x10f090, 0xc000007f); 284 ram_nsec(fuc, 1000); 285 286 if (from == 0) { 287 ram_wr32(fuc, 0x10f824, 0x00007fd4); 288 } else { 289 ram_wr32(fuc, 0x1373ec, 0x00020404); 290 } 291 292 if (mode == 0) { 293 ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000); 294 ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000); 295 ram_wr32(fuc, 0x10f830, 0x41500010); 296 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000); 297 ram_mask(fuc, 0x132100, 0x00000100, 0x00000100); 298 ram_wr32(fuc, 0x10f050, 0xff000090); 299 ram_wr32(fuc, 0x1373ec, 0x00020f0f); 300 ram_wr32(fuc, 0x1373f0, 0x00000003); 301 ram_wr32(fuc, 0x137310, 0x81201616); 302 ram_wr32(fuc, 0x132100, 0x00000001); 303 // 0x00020039 // 0x000000ba 304 ram_wr32(fuc, 0x10f830, 0x00300017); 305 ram_wr32(fuc, 0x1373f0, 0x00000001); 306 ram_wr32(fuc, 0x10f824, 0x00007e77); 307 ram_wr32(fuc, 0x132000, 0x18030001); 308 ram_wr32(fuc, 0x10f090, 0x4000007e); 309 ram_nsec(fuc, 2000); 310 ram_wr32(fuc, 0x10f314, 0x00000001); 311 ram_wr32(fuc, 0x10f210, 0x80000000); 312 ram_wr32(fuc, 0x10f338, 0x00300220); 313 ram_wr32(fuc, 0x10f300, 0x0000011d); 314 ram_nsec(fuc, 1000); 315 ram_wr32(fuc, 0x10f290, 0x02060505); 316 ram_wr32(fuc, 0x10f294, 0x34208288); 317 ram_wr32(fuc, 0x10f298, 0x44050411); 318 ram_wr32(fuc, 0x10f29c, 0x0000114c); 319 ram_wr32(fuc, 0x10f2a0, 0x42e10069); 320 ram_wr32(fuc, 0x10f614, 0x40044f77); 321 ram_wr32(fuc, 0x10f610, 0x40044f77); 322 ram_wr32(fuc, 0x10f344, 0x00600009); 323 ram_nsec(fuc, 1000); 324 ram_wr32(fuc, 0x10f348, 0x00700008); 325 ram_wr32(fuc, 0x61c140, 0x19240000); 326 ram_wr32(fuc, 0x10f830, 0x00300017); 327 gf100_ram_train(fuc, 0x80021001); 328 gf100_ram_train(fuc, 0x80081001); 329 ram_wr32(fuc, 0x10f340, 0x00500004); 330 ram_nsec(fuc, 1000); 331 ram_wr32(fuc, 0x10f830, 0x01300017); 332 ram_wr32(fuc, 0x10f830, 0x00300017); 333 // 0x00030020 // 0x00000000 // 0x00000000 334 // 0x00020034 // 0x0000000b 335 ram_wr32(fuc, 0x100b0c, 0x00080028); 336 ram_wr32(fuc, 0x611200, 0x00003330); 337 } else { 338 ram_wr32(fuc, 0x10f800, 0x00001800); 339 ram_wr32(fuc, 0x13d8f4, 0x00000000); 340 ram_wr32(fuc, 0x1373ec, 0x00020404); 341 ram_wr32(fuc, 0x1373f0, 0x00000003); 342 ram_wr32(fuc, 0x10f830, 0x40700010); 343 ram_wr32(fuc, 0x10f830, 0x40500010); 344 ram_wr32(fuc, 0x13d8f4, 0x00000000); 345 ram_wr32(fuc, 0x1373f8, 0x00000000); 346 ram_wr32(fuc, 0x132100, 0x00000101); 347 ram_wr32(fuc, 0x137310, 0x89201616); 348 ram_wr32(fuc, 0x10f050, 0xff000090); 349 ram_wr32(fuc, 0x1373ec, 0x00030404); 350 ram_wr32(fuc, 0x1373f0, 0x00000002); 351 // 0x00020039 // 0x00000011 352 ram_wr32(fuc, 0x132100, 0x00000001); 353 ram_wr32(fuc, 0x1373f8, 0x00002000); 354 ram_nsec(fuc, 2000); 355 ram_wr32(fuc, 0x10f808, 0x7aaa0050); 356 ram_wr32(fuc, 0x10f830, 0x00500010); 357 ram_wr32(fuc, 0x10f200, 0x00ce1000); 358 ram_wr32(fuc, 0x10f090, 0x4000007e); 359 ram_nsec(fuc, 2000); 360 ram_wr32(fuc, 0x10f314, 0x00000001); 361 ram_wr32(fuc, 0x10f210, 0x80000000); 362 ram_wr32(fuc, 0x10f338, 0x00300200); 363 ram_wr32(fuc, 0x10f300, 0x0000084d); 364 ram_nsec(fuc, 1000); 365 ram_wr32(fuc, 0x10f290, 0x0b343825); 366 ram_wr32(fuc, 0x10f294, 0x3483028e); 367 ram_wr32(fuc, 0x10f298, 0x440c0600); 368 ram_wr32(fuc, 0x10f29c, 0x0000214c); 369 ram_wr32(fuc, 0x10f2a0, 0x42e20069); 370 ram_wr32(fuc, 0x10f200, 0x00ce0000); 371 ram_wr32(fuc, 0x10f614, 0x60044e77); 372 ram_wr32(fuc, 0x10f610, 0x60044e77); 373 ram_wr32(fuc, 0x10f340, 0x00500000); 374 ram_nsec(fuc, 1000); 375 ram_wr32(fuc, 0x10f344, 0x00600228); 376 ram_nsec(fuc, 1000); 377 ram_wr32(fuc, 0x10f348, 0x00700000); 378 ram_wr32(fuc, 0x13d8f4, 0x00000000); 379 ram_wr32(fuc, 0x61c140, 0x09a40000); 380 381 gf100_ram_train(fuc, 0x800e1008); 382 383 ram_nsec(fuc, 1000); 384 ram_wr32(fuc, 0x10f800, 0x00001804); 385 // 0x00030020 // 0x00000000 // 0x00000000 386 // 0x00020034 // 0x0000000b 387 ram_wr32(fuc, 0x13d8f4, 0x00000000); 388 ram_wr32(fuc, 0x100b0c, 0x00080028); 389 ram_wr32(fuc, 0x611200, 0x00003330); 390 ram_nsec(fuc, 100000); 391 ram_wr32(fuc, 0x10f9b0, 0x05313f41); 392 ram_wr32(fuc, 0x10f9b4, 0x00002f50); 393 394 gf100_ram_train(fuc, 0x010c1001); 395 } 396 397 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800); 398 // 0x00020016 // 0x00000000 399 400 if (mode == 0) 401 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000); 402 403 return 0; 404 } 405 406 static int 407 gf100_ram_prog(struct nvkm_fb *fb) 408 { 409 struct nvkm_device *device = nv_device(fb); 410 struct gf100_ram *ram = (void *)fb->ram; 411 struct gf100_ramfuc *fuc = &ram->fuc; 412 ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true)); 413 return 0; 414 } 415 416 static void 417 gf100_ram_tidy(struct nvkm_fb *fb) 418 { 419 struct gf100_ram *ram = (void *)fb->ram; 420 struct gf100_ramfuc *fuc = &ram->fuc; 421 ram_exec(fuc, false); 422 } 423 424 extern const u8 gf100_pte_storage_type_map[256]; 425 426 void 427 gf100_ram_put(struct nvkm_fb *fb, struct nvkm_mem **pmem) 428 { 429 struct nvkm_ltc *ltc = nvkm_ltc(fb); 430 struct nvkm_mem *mem = *pmem; 431 432 *pmem = NULL; 433 if (unlikely(mem == NULL)) 434 return; 435 436 mutex_lock(&fb->subdev.mutex); 437 if (mem->tag) 438 ltc->tags_free(ltc, &mem->tag); 439 __nv50_ram_put(fb, mem); 440 mutex_unlock(&fb->subdev.mutex); 441 442 kfree(mem); 443 } 444 445 int 446 gf100_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin, 447 u32 memtype, struct nvkm_mem **pmem) 448 { 449 struct nvkm_mm *mm = &fb->vram; 450 struct nvkm_mm_node *r; 451 struct nvkm_mem *mem; 452 int type = (memtype & 0x0ff); 453 int back = (memtype & 0x800); 454 const bool comp = gf100_pte_storage_type_map[type] != type; 455 int ret; 456 457 size >>= 12; 458 align >>= 12; 459 ncmin >>= 12; 460 if (!ncmin) 461 ncmin = size; 462 463 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 464 if (!mem) 465 return -ENOMEM; 466 467 INIT_LIST_HEAD(&mem->regions); 468 mem->size = size; 469 470 mutex_lock(&fb->subdev.mutex); 471 if (comp) { 472 struct nvkm_ltc *ltc = nvkm_ltc(fb); 473 474 /* compression only works with lpages */ 475 if (align == (1 << (17 - 12))) { 476 int n = size >> 5; 477 ltc->tags_alloc(ltc, n, &mem->tag); 478 } 479 480 if (unlikely(!mem->tag)) 481 type = gf100_pte_storage_type_map[type]; 482 } 483 mem->memtype = type; 484 485 do { 486 if (back) 487 ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r); 488 else 489 ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r); 490 if (ret) { 491 mutex_unlock(&fb->subdev.mutex); 492 fb->ram->put(fb, &mem); 493 return ret; 494 } 495 496 list_add_tail(&r->rl_entry, &mem->regions); 497 size -= r->length; 498 } while (size); 499 mutex_unlock(&fb->subdev.mutex); 500 501 r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); 502 mem->offset = (u64)r->offset << 12; 503 *pmem = mem; 504 return 0; 505 } 506 507 int 508 gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine, 509 struct nvkm_oclass *oclass, u32 maskaddr, int size, 510 void **pobject) 511 { 512 struct nvkm_fb *fb = nvkm_fb(parent); 513 struct nvkm_subdev *subdev = &fb->subdev; 514 struct nvkm_device *device = subdev->device; 515 struct nvkm_bios *bios = device->bios; 516 struct nvkm_ram *ram; 517 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 518 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 519 u32 parts = nvkm_rd32(device, 0x022438); 520 u32 pmask = nvkm_rd32(device, maskaddr); 521 u32 bsize = nvkm_rd32(device, 0x10f20c); 522 u32 offset, length; 523 bool uniform = true; 524 int ret, part; 525 526 ret = nvkm_ram_create_(parent, engine, oclass, size, pobject); 527 ram = *pobject; 528 if (ret) 529 return ret; 530 531 nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800)); 532 nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask); 533 534 ram->type = nvkm_fb_bios_memtype(bios); 535 ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1; 536 537 /* read amount of vram attached to each memory controller */ 538 for (part = 0; part < parts; part++) { 539 if (!(pmask & (1 << part))) { 540 u32 size = nvkm_rd32(device, 0x11020c + (part * 0x1000)); 541 if (size != bsize) { 542 if (size < bsize) 543 bsize = size; 544 uniform = false; 545 } 546 547 nvkm_debug(subdev, "%d: size %08x\n", part, size); 548 ram->size += (u64)size << 20; 549 } 550 } 551 552 /* if all controllers have the same amount attached, there's no holes */ 553 if (uniform) { 554 offset = rsvd_head; 555 length = (ram->size >> 12) - rsvd_head - rsvd_tail; 556 ret = nvkm_mm_init(&fb->vram, offset, length, 1); 557 } else { 558 /* otherwise, address lowest common amount from 0GiB */ 559 ret = nvkm_mm_init(&fb->vram, rsvd_head, 560 (bsize << 8) * parts - rsvd_head, 1); 561 if (ret) 562 return ret; 563 564 /* and the rest starting from (8GiB + common_size) */ 565 offset = (0x0200000000ULL >> 12) + (bsize << 8); 566 length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail; 567 568 ret = nvkm_mm_init(&fb->vram, offset, length, 1); 569 if (ret) 570 nvkm_mm_fini(&fb->vram); 571 } 572 573 if (ret) 574 return ret; 575 576 ram->get = gf100_ram_get; 577 ram->put = gf100_ram_put; 578 return 0; 579 } 580 581 static int 582 gf100_ram_init(struct nvkm_object *object) 583 { 584 struct nvkm_fb *fb = (void *)object->parent; 585 struct nvkm_device *device = fb->subdev.device; 586 struct gf100_ram *ram = (void *)object; 587 int ret, i; 588 589 ret = nvkm_ram_init(&ram->base); 590 if (ret) 591 return ret; 592 593 /* prepare for ddr link training, and load training patterns */ 594 switch (ram->base.type) { 595 case NV_MEM_TYPE_GDDR5: { 596 static const u8 train0[] = { 597 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc, 598 0x00, 0xff, 0xff, 0x00, 0xff, 0x00, 599 }; 600 static const u32 train1[] = { 601 0x00000000, 0xffffffff, 602 0x55555555, 0xaaaaaaaa, 603 0x33333333, 0xcccccccc, 604 0xf0f0f0f0, 0x0f0f0f0f, 605 0x00ff00ff, 0xff00ff00, 606 0x0000ffff, 0xffff0000, 607 }; 608 609 for (i = 0; i < 0x30; i++) { 610 nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8)); 611 nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8)); 612 nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]); 613 nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]); 614 nvkm_wr32(device, 0x10f918, train1[i % 12]); 615 nvkm_wr32(device, 0x10f91c, train1[i % 12]); 616 nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]); 617 nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]); 618 nvkm_wr32(device, 0x10f918, train1[i % 12]); 619 nvkm_wr32(device, 0x10f91c, train1[i % 12]); 620 } 621 } break; 622 default: 623 break; 624 } 625 626 return 0; 627 } 628 629 static int 630 gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 631 struct nvkm_oclass *oclass, void *data, u32 size, 632 struct nvkm_object **pobject) 633 { 634 struct nvkm_fb *fb = nvkm_fb(parent); 635 struct nvkm_subdev *subdev = &fb->subdev; 636 struct nvkm_bios *bios = subdev->device->bios; 637 struct gf100_ram *ram; 638 int ret; 639 640 ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram); 641 *pobject = nv_object(ram); 642 if (ret) 643 return ret; 644 645 ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll); 646 if (ret) { 647 nvkm_error(subdev, "mclk refpll data not found\n"); 648 return ret; 649 } 650 651 ret = nvbios_pll_parse(bios, 0x04, &ram->mempll); 652 if (ret) { 653 nvkm_error(subdev, "mclk pll data not found\n"); 654 return ret; 655 } 656 657 switch (ram->base.type) { 658 case NV_MEM_TYPE_GDDR5: 659 ram->base.calc = gf100_ram_calc; 660 ram->base.prog = gf100_ram_prog; 661 ram->base.tidy = gf100_ram_tidy; 662 break; 663 default: 664 nvkm_warn(subdev, "reclocking of this ram type unsupported\n"); 665 return 0; 666 } 667 668 ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20); 669 ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24); 670 ram->fuc.r_0x137320 = ramfuc_reg(0x137320); 671 ram->fuc.r_0x137330 = ramfuc_reg(0x137330); 672 673 ram->fuc.r_0x132000 = ramfuc_reg(0x132000); 674 ram->fuc.r_0x132004 = ramfuc_reg(0x132004); 675 ram->fuc.r_0x132100 = ramfuc_reg(0x132100); 676 677 ram->fuc.r_0x137390 = ramfuc_reg(0x137390); 678 679 ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290); 680 ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294); 681 ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298); 682 ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c); 683 ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0); 684 685 ram->fuc.r_0x10f300 = ramfuc_reg(0x10f300); 686 ram->fuc.r_0x10f338 = ramfuc_reg(0x10f338); 687 ram->fuc.r_0x10f340 = ramfuc_reg(0x10f340); 688 ram->fuc.r_0x10f344 = ramfuc_reg(0x10f344); 689 ram->fuc.r_0x10f348 = ramfuc_reg(0x10f348); 690 691 ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910); 692 ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914); 693 694 ram->fuc.r_0x100b0c = ramfuc_reg(0x100b0c); 695 ram->fuc.r_0x10f050 = ramfuc_reg(0x10f050); 696 ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090); 697 ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200); 698 ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210); 699 ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310); 700 ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314); 701 ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610); 702 ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614); 703 ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800); 704 ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808); 705 ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824); 706 ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830); 707 ram->fuc.r_0x10f988 = ramfuc_reg(0x10f988); 708 ram->fuc.r_0x10f98c = ramfuc_reg(0x10f98c); 709 ram->fuc.r_0x10f990 = ramfuc_reg(0x10f990); 710 ram->fuc.r_0x10f998 = ramfuc_reg(0x10f998); 711 ram->fuc.r_0x10f9b0 = ramfuc_reg(0x10f9b0); 712 ram->fuc.r_0x10f9b4 = ramfuc_reg(0x10f9b4); 713 ram->fuc.r_0x10fb04 = ramfuc_reg(0x10fb04); 714 ram->fuc.r_0x10fb08 = ramfuc_reg(0x10fb08); 715 ram->fuc.r_0x137310 = ramfuc_reg(0x137300); 716 ram->fuc.r_0x137310 = ramfuc_reg(0x137310); 717 ram->fuc.r_0x137360 = ramfuc_reg(0x137360); 718 ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec); 719 ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0); 720 ram->fuc.r_0x1373f8 = ramfuc_reg(0x1373f8); 721 722 ram->fuc.r_0x61c140 = ramfuc_reg(0x61c140); 723 ram->fuc.r_0x611200 = ramfuc_reg(0x611200); 724 725 ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4); 726 return 0; 727 } 728 729 struct nvkm_oclass 730 gf100_ram_oclass = { 731 .handle = 0, 732 .ofuncs = &(struct nvkm_ofuncs) { 733 .ctor = gf100_ram_ctor, 734 .dtor = _nvkm_ram_dtor, 735 .init = gf100_ram_init, 736 .fini = _nvkm_ram_fini, 737 } 738 }; 739