1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv40.h" 25 #include "regs.h" 26 27 #include <core/client.h> 28 #include <core/handle.h> 29 #include <subdev/fb.h> 30 #include <subdev/timer.h> 31 #include <engine/fifo.h> 32 33 struct nv40_gr { 34 struct nvkm_gr base; 35 u32 size; 36 }; 37 38 struct nv40_gr_chan { 39 struct nvkm_gr_chan base; 40 }; 41 42 static u64 43 nv40_gr_units(struct nvkm_gr *gr) 44 { 45 return nvkm_rd32(gr->engine.subdev.device, 0x1540); 46 } 47 48 /******************************************************************************* 49 * Graphics object classes 50 ******************************************************************************/ 51 52 static int 53 nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 54 struct nvkm_oclass *oclass, void *data, u32 size, 55 struct nvkm_object **pobject) 56 { 57 struct nvkm_gpuobj *obj; 58 int ret; 59 60 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent, 61 20, 16, 0, &obj); 62 *pobject = nv_object(obj); 63 if (ret) 64 return ret; 65 66 nv_wo32(obj, 0x00, nv_mclass(obj)); 67 nv_wo32(obj, 0x04, 0x00000000); 68 nv_wo32(obj, 0x08, 0x00000000); 69 #ifdef __BIG_ENDIAN 70 nv_mo32(obj, 0x08, 0x01000000, 0x01000000); 71 #endif 72 nv_wo32(obj, 0x0c, 0x00000000); 73 nv_wo32(obj, 0x10, 0x00000000); 74 return 0; 75 } 76 77 static struct nvkm_ofuncs 78 nv40_gr_ofuncs = { 79 .ctor = nv40_gr_object_ctor, 80 .dtor = _nvkm_gpuobj_dtor, 81 .init = _nvkm_gpuobj_init, 82 .fini = _nvkm_gpuobj_fini, 83 .rd32 = _nvkm_gpuobj_rd32, 84 .wr32 = _nvkm_gpuobj_wr32, 85 }; 86 87 static struct nvkm_oclass 88 nv40_gr_sclass[] = { 89 { 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */ 90 { 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */ 91 { 0x0030, &nv40_gr_ofuncs, NULL }, /* null */ 92 { 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */ 93 { 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */ 94 { 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */ 95 { 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */ 96 { 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */ 97 { 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */ 98 { 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */ 99 { 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */ 100 { 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */ 101 { 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */ 102 { 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */ 103 { 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */ 104 { 0x4097, &nv40_gr_ofuncs, NULL }, /* curie */ 105 {}, 106 }; 107 108 static struct nvkm_oclass 109 nv44_gr_sclass[] = { 110 { 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */ 111 { 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */ 112 { 0x0030, &nv40_gr_ofuncs, NULL }, /* null */ 113 { 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */ 114 { 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */ 115 { 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */ 116 { 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */ 117 { 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */ 118 { 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */ 119 { 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */ 120 { 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */ 121 { 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */ 122 { 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */ 123 { 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */ 124 { 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */ 125 { 0x4497, &nv40_gr_ofuncs, NULL }, /* curie */ 126 {}, 127 }; 128 129 /******************************************************************************* 130 * PGRAPH context 131 ******************************************************************************/ 132 133 static int 134 nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 135 struct nvkm_oclass *oclass, void *data, u32 size, 136 struct nvkm_object **pobject) 137 { 138 struct nv40_gr *gr = (void *)engine; 139 struct nv40_gr_chan *chan; 140 int ret; 141 142 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, gr->size, 143 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); 144 *pobject = nv_object(chan); 145 if (ret) 146 return ret; 147 148 nv40_grctx_fill(nv_device(gr), nv_gpuobj(chan)); 149 nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4); 150 return 0; 151 } 152 153 static int 154 nv40_gr_context_fini(struct nvkm_object *object, bool suspend) 155 { 156 struct nv40_gr *gr = (void *)object->engine; 157 struct nv40_gr_chan *chan = (void *)object; 158 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 159 struct nvkm_device *device = subdev->device; 160 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4; 161 int ret = 0; 162 163 nvkm_mask(device, 0x400720, 0x00000001, 0x00000000); 164 165 if (nvkm_rd32(device, 0x40032c) == inst) { 166 if (suspend) { 167 nvkm_wr32(device, 0x400720, 0x00000000); 168 nvkm_wr32(device, 0x400784, inst); 169 nvkm_mask(device, 0x400310, 0x00000020, 0x00000020); 170 nvkm_mask(device, 0x400304, 0x00000001, 0x00000001); 171 if (nvkm_msec(device, 2000, 172 if (!(nvkm_rd32(device, 0x400300) & 0x00000001)) 173 break; 174 ) < 0) { 175 u32 insn = nvkm_rd32(device, 0x400308); 176 nvkm_warn(subdev, "ctxprog timeout %08x\n", insn); 177 ret = -EBUSY; 178 } 179 } 180 181 nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000); 182 } 183 184 if (nvkm_rd32(device, 0x400330) == inst) 185 nvkm_mask(device, 0x400330, 0x01000000, 0x00000000); 186 187 nvkm_mask(device, 0x400720, 0x00000001, 0x00000001); 188 return ret; 189 } 190 191 static struct nvkm_oclass 192 nv40_gr_cclass = { 193 .handle = NV_ENGCTX(GR, 0x40), 194 .ofuncs = &(struct nvkm_ofuncs) { 195 .ctor = nv40_gr_context_ctor, 196 .dtor = _nvkm_gr_context_dtor, 197 .init = _nvkm_gr_context_init, 198 .fini = nv40_gr_context_fini, 199 .rd32 = _nvkm_gr_context_rd32, 200 .wr32 = _nvkm_gr_context_wr32, 201 }, 202 }; 203 204 /******************************************************************************* 205 * PGRAPH engine/subdev functions 206 ******************************************************************************/ 207 208 static void 209 nv40_gr_tile_prog(struct nvkm_engine *engine, int i) 210 { 211 struct nv40_gr *gr = (void *)engine; 212 struct nvkm_device *device = gr->base.engine.subdev.device; 213 struct nvkm_fifo *fifo = device->fifo; 214 struct nvkm_fb_tile *tile = &device->fb->tile.region[i]; 215 unsigned long flags; 216 217 fifo->pause(fifo, &flags); 218 nv04_gr_idle(gr); 219 220 switch (nv_device(gr)->chipset) { 221 case 0x40: 222 case 0x41: 223 case 0x42: 224 case 0x43: 225 case 0x45: 226 case 0x4e: 227 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch); 228 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit); 229 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr); 230 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch); 231 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit); 232 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr); 233 switch (nv_device(gr)->chipset) { 234 case 0x40: 235 case 0x45: 236 nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp); 237 nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp); 238 break; 239 case 0x41: 240 case 0x42: 241 case 0x43: 242 nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp); 243 nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp); 244 break; 245 default: 246 break; 247 } 248 break; 249 case 0x44: 250 case 0x4a: 251 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch); 252 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit); 253 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr); 254 break; 255 case 0x46: 256 case 0x4c: 257 case 0x47: 258 case 0x49: 259 case 0x4b: 260 case 0x63: 261 case 0x67: 262 case 0x68: 263 nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch); 264 nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit); 265 nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr); 266 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch); 267 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit); 268 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr); 269 switch (nv_device(gr)->chipset) { 270 case 0x47: 271 case 0x49: 272 case 0x4b: 273 nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp); 274 nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp); 275 break; 276 default: 277 break; 278 } 279 break; 280 default: 281 break; 282 } 283 284 fifo->start(fifo, &flags); 285 } 286 287 static void 288 nv40_gr_intr(struct nvkm_subdev *subdev) 289 { 290 struct nvkm_fifo *fifo = nvkm_fifo(subdev); 291 struct nvkm_engine *engine = nv_engine(subdev); 292 struct nvkm_object *engctx; 293 struct nvkm_handle *handle = NULL; 294 struct nv40_gr *gr = (void *)subdev; 295 struct nvkm_device *device = gr->base.engine.subdev.device; 296 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR); 297 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE); 298 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS); 299 u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff; 300 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR); 301 u32 subc = (addr & 0x00070000) >> 16; 302 u32 mthd = (addr & 0x00001ffc); 303 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA); 304 u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff; 305 u32 show = stat; 306 char msg[128], src[128], sta[128]; 307 int chid; 308 309 engctx = nvkm_engctx_get(engine, inst); 310 chid = fifo->chid(fifo, engctx); 311 312 if (stat & NV_PGRAPH_INTR_ERROR) { 313 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 314 handle = nvkm_handle_get_class(engctx, class); 315 if (handle && !nv_call(handle->object, mthd, data)) 316 show &= ~NV_PGRAPH_INTR_ERROR; 317 nvkm_handle_put(handle); 318 } 319 320 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { 321 nvkm_mask(device, 0x402000, 0, 0); 322 } 323 } 324 325 nvkm_wr32(device, NV03_PGRAPH_INTR, stat); 326 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001); 327 328 if (show) { 329 nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show); 330 nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource); 331 nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus); 332 nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] " 333 "nstatus %08x [%s] ch %d [%08x %s] subc %d " 334 "class %04x mthd %04x data %08x\n", 335 show, msg, nsource, src, nstatus, sta, chid, 336 inst << 4, nvkm_client_name(engctx), subc, 337 class, mthd, data); 338 } 339 340 nvkm_engctx_put(engctx); 341 } 342 343 static int 344 nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 345 struct nvkm_oclass *oclass, void *data, u32 size, 346 struct nvkm_object **pobject) 347 { 348 struct nv40_gr *gr; 349 int ret; 350 351 ret = nvkm_gr_create(parent, engine, oclass, true, &gr); 352 *pobject = nv_object(gr); 353 if (ret) 354 return ret; 355 356 nv_subdev(gr)->unit = 0x00001000; 357 nv_subdev(gr)->intr = nv40_gr_intr; 358 nv_engine(gr)->cclass = &nv40_gr_cclass; 359 if (nv44_gr_class(gr)) 360 nv_engine(gr)->sclass = nv44_gr_sclass; 361 else 362 nv_engine(gr)->sclass = nv40_gr_sclass; 363 nv_engine(gr)->tile_prog = nv40_gr_tile_prog; 364 365 gr->base.units = nv40_gr_units; 366 return 0; 367 } 368 369 static int 370 nv40_gr_init(struct nvkm_object *object) 371 { 372 struct nvkm_engine *engine = nv_engine(object); 373 struct nv40_gr *gr = (void *)engine; 374 struct nvkm_device *device = gr->base.engine.subdev.device; 375 struct nvkm_fb *fb = device->fb; 376 int ret, i, j; 377 u32 vramsz; 378 379 ret = nvkm_gr_init(&gr->base); 380 if (ret) 381 return ret; 382 383 /* generate and upload context program */ 384 ret = nv40_grctx_init(nv_device(gr), &gr->size); 385 if (ret) 386 return ret; 387 388 /* No context present currently */ 389 nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 390 391 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF); 392 nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); 393 394 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); 395 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000); 396 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0); 397 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055); 398 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000); 399 nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f); 400 401 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100); 402 nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF); 403 404 j = nvkm_rd32(device, 0x1540) & 0xff; 405 if (j) { 406 for (i = 0; !(j & 1); j >>= 1, i++) 407 ; 408 nvkm_wr32(device, 0x405000, i); 409 } 410 411 if (nv_device(gr)->chipset == 0x40) { 412 nvkm_wr32(device, 0x4009b0, 0x83280fff); 413 nvkm_wr32(device, 0x4009b4, 0x000000a0); 414 } else { 415 nvkm_wr32(device, 0x400820, 0x83280eff); 416 nvkm_wr32(device, 0x400824, 0x000000a0); 417 } 418 419 switch (nv_device(gr)->chipset) { 420 case 0x40: 421 case 0x45: 422 nvkm_wr32(device, 0x4009b8, 0x0078e366); 423 nvkm_wr32(device, 0x4009bc, 0x0000014c); 424 break; 425 case 0x41: 426 case 0x42: /* pciid also 0x00Cx */ 427 /* case 0x0120: XXX (pciid) */ 428 nvkm_wr32(device, 0x400828, 0x007596ff); 429 nvkm_wr32(device, 0x40082c, 0x00000108); 430 break; 431 case 0x43: 432 nvkm_wr32(device, 0x400828, 0x0072cb77); 433 nvkm_wr32(device, 0x40082c, 0x00000108); 434 break; 435 case 0x44: 436 case 0x46: /* G72 */ 437 case 0x4a: 438 case 0x4c: /* G7x-based C51 */ 439 case 0x4e: 440 nvkm_wr32(device, 0x400860, 0); 441 nvkm_wr32(device, 0x400864, 0); 442 break; 443 case 0x47: /* G70 */ 444 case 0x49: /* G71 */ 445 case 0x4b: /* G73 */ 446 nvkm_wr32(device, 0x400828, 0x07830610); 447 nvkm_wr32(device, 0x40082c, 0x0000016A); 448 break; 449 default: 450 break; 451 } 452 453 nvkm_wr32(device, 0x400b38, 0x2ffff800); 454 nvkm_wr32(device, 0x400b3c, 0x00006000); 455 456 /* Tiling related stuff. */ 457 switch (nv_device(gr)->chipset) { 458 case 0x44: 459 case 0x4a: 460 nvkm_wr32(device, 0x400bc4, 0x1003d888); 461 nvkm_wr32(device, 0x400bbc, 0xb7a7b500); 462 break; 463 case 0x46: 464 nvkm_wr32(device, 0x400bc4, 0x0000e024); 465 nvkm_wr32(device, 0x400bbc, 0xb7a7b520); 466 break; 467 case 0x4c: 468 case 0x4e: 469 case 0x67: 470 nvkm_wr32(device, 0x400bc4, 0x1003d888); 471 nvkm_wr32(device, 0x400bbc, 0xb7a7b540); 472 break; 473 default: 474 break; 475 } 476 477 /* Turn all the tiling regions off. */ 478 for (i = 0; i < fb->tile.regions; i++) 479 engine->tile_prog(engine, i); 480 481 /* begin RAM config */ 482 vramsz = nv_device_resource_len(nv_device(gr), 1) - 1; 483 switch (nv_device(gr)->chipset) { 484 case 0x40: 485 nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200)); 486 nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204)); 487 nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200)); 488 nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204)); 489 nvkm_wr32(device, 0x400820, 0); 490 nvkm_wr32(device, 0x400824, 0); 491 nvkm_wr32(device, 0x400864, vramsz); 492 nvkm_wr32(device, 0x400868, vramsz); 493 break; 494 default: 495 switch (nv_device(gr)->chipset) { 496 case 0x41: 497 case 0x42: 498 case 0x43: 499 case 0x45: 500 case 0x4e: 501 case 0x44: 502 case 0x4a: 503 nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200)); 504 nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204)); 505 break; 506 default: 507 nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200)); 508 nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204)); 509 break; 510 } 511 nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200)); 512 nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204)); 513 nvkm_wr32(device, 0x400840, 0); 514 nvkm_wr32(device, 0x400844, 0); 515 nvkm_wr32(device, 0x4008A0, vramsz); 516 nvkm_wr32(device, 0x4008A4, vramsz); 517 break; 518 } 519 520 return 0; 521 } 522 523 struct nvkm_oclass 524 nv40_gr_oclass = { 525 .handle = NV_ENGINE(GR, 0x40), 526 .ofuncs = &(struct nvkm_ofuncs) { 527 .ctor = nv40_gr_ctor, 528 .dtor = _nvkm_gr_dtor, 529 .init = nv40_gr_init, 530 .fini = _nvkm_gr_fini, 531 }, 532 }; 533