1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv04.h" 25 26 #include <core/client.h> 27 #include <core/engctx.h> 28 #include <core/handle.h> 29 #include <core/ramht.h> 30 #include <subdev/instmem/nv04.h> 31 #include <subdev/timer.h> 32 33 #include <nvif/class.h> 34 #include <nvif/unpack.h> 35 36 static struct ramfc_desc 37 nv04_ramfc[] = { 38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 40 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 41 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, 42 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, 43 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, 44 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, 45 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, 46 {} 47 }; 48 49 /******************************************************************************* 50 * FIFO channel objects 51 ******************************************************************************/ 52 53 int 54 nv04_fifo_object_attach(struct nvkm_object *parent, 55 struct nvkm_object *object, u32 handle) 56 { 57 struct nv04_fifo *fifo = (void *)parent->engine; 58 struct nv04_fifo_chan *chan = (void *)parent; 59 u32 context, chid = chan->base.chid; 60 int ret; 61 62 if (nv_iclass(object, NV_GPUOBJ_CLASS)) 63 context = nv_gpuobj(object)->addr >> 4; 64 else 65 context = 0x00000004; /* just non-zero */ 66 67 switch (nv_engidx(object->engine)) { 68 case NVDEV_ENGINE_DMAOBJ: 69 case NVDEV_ENGINE_SW: 70 context |= 0x00000000; 71 break; 72 case NVDEV_ENGINE_GR: 73 context |= 0x00010000; 74 break; 75 case NVDEV_ENGINE_MPEG: 76 context |= 0x00020000; 77 break; 78 default: 79 return -EINVAL; 80 } 81 82 context |= 0x80000000; /* valid */ 83 context |= chid << 24; 84 85 mutex_lock(&nv_subdev(fifo)->mutex); 86 ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context); 87 mutex_unlock(&nv_subdev(fifo)->mutex); 88 return ret; 89 } 90 91 void 92 nv04_fifo_object_detach(struct nvkm_object *parent, int cookie) 93 { 94 struct nv04_fifo *fifo = (void *)parent->engine; 95 mutex_lock(&nv_subdev(fifo)->mutex); 96 nvkm_ramht_remove(fifo->ramht, cookie); 97 mutex_unlock(&nv_subdev(fifo)->mutex); 98 } 99 100 int 101 nv04_fifo_context_attach(struct nvkm_object *parent, 102 struct nvkm_object *object) 103 { 104 nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid; 105 return 0; 106 } 107 108 static int 109 nv04_fifo_chan_ctor(struct nvkm_object *parent, 110 struct nvkm_object *engine, 111 struct nvkm_oclass *oclass, void *data, u32 size, 112 struct nvkm_object **pobject) 113 { 114 union { 115 struct nv03_channel_dma_v0 v0; 116 } *args = data; 117 struct nv04_fifo *fifo = (void *)engine; 118 struct nv04_fifo_chan *chan; 119 int ret; 120 121 nvif_ioctl(parent, "create channel dma size %d\n", size); 122 if (nvif_unpack(args->v0, 0, 0, false)) { 123 nvif_ioctl(parent, "create channel dma vers %d pushbuf %08x " 124 "offset %016llx\n", args->v0.version, 125 args->v0.pushbuf, args->v0.offset); 126 } else 127 return ret; 128 129 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 130 0x10000, args->v0.pushbuf, 131 (1ULL << NVDEV_ENGINE_DMAOBJ) | 132 (1ULL << NVDEV_ENGINE_SW) | 133 (1ULL << NVDEV_ENGINE_GR), &chan); 134 *pobject = nv_object(chan); 135 if (ret) 136 return ret; 137 138 args->v0.chid = chan->base.chid; 139 140 nv_parent(chan)->object_attach = nv04_fifo_object_attach; 141 nv_parent(chan)->object_detach = nv04_fifo_object_detach; 142 nv_parent(chan)->context_attach = nv04_fifo_context_attach; 143 chan->ramfc = chan->base.chid * 32; 144 145 nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset); 146 nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset); 147 nv_wo32(fifo->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); 148 nv_wo32(fifo->ramfc, chan->ramfc + 0x10, 149 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 150 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 151 #ifdef __BIG_ENDIAN 152 NV_PFIFO_CACHE1_BIG_ENDIAN | 153 #endif 154 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 155 return 0; 156 } 157 158 void 159 nv04_fifo_chan_dtor(struct nvkm_object *object) 160 { 161 struct nv04_fifo *fifo = (void *)object->engine; 162 struct nv04_fifo_chan *chan = (void *)object; 163 struct ramfc_desc *c = fifo->ramfc_desc; 164 165 do { 166 nv_wo32(fifo->ramfc, chan->ramfc + c->ctxp, 0x00000000); 167 } while ((++c)->bits); 168 169 nvkm_fifo_channel_destroy(&chan->base); 170 } 171 172 int 173 nv04_fifo_chan_init(struct nvkm_object *object) 174 { 175 struct nv04_fifo *fifo = (void *)object->engine; 176 struct nv04_fifo_chan *chan = (void *)object; 177 struct nvkm_device *device = fifo->base.engine.subdev.device; 178 u32 mask = 1 << chan->base.chid; 179 unsigned long flags; 180 int ret; 181 182 ret = nvkm_fifo_channel_init(&chan->base); 183 if (ret) 184 return ret; 185 186 spin_lock_irqsave(&fifo->base.lock, flags); 187 nvkm_mask(device, NV04_PFIFO_MODE, mask, mask); 188 spin_unlock_irqrestore(&fifo->base.lock, flags); 189 return 0; 190 } 191 192 int 193 nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) 194 { 195 struct nv04_fifo *fifo = (void *)object->engine; 196 struct nv04_fifo_chan *chan = (void *)object; 197 struct nvkm_gpuobj *fctx = fifo->ramfc; 198 struct nvkm_device *device = fifo->base.engine.subdev.device; 199 struct ramfc_desc *c; 200 unsigned long flags; 201 u32 data = chan->ramfc; 202 u32 chid; 203 204 /* prevent fifo context switches */ 205 spin_lock_irqsave(&fifo->base.lock, flags); 206 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 207 208 /* if this channel is active, replace it with a null context */ 209 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; 210 if (chid == chan->base.chid) { 211 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); 212 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); 213 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); 214 215 c = fifo->ramfc_desc; 216 do { 217 u32 rm = ((1ULL << c->bits) - 1) << c->regs; 218 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; 219 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs; 220 u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm); 221 nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); 222 } while ((++c)->bits); 223 224 c = fifo->ramfc_desc; 225 do { 226 nvkm_wr32(device, c->regp, 0x00000000); 227 } while ((++c)->bits); 228 229 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); 230 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); 231 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); 232 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 233 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 234 } 235 236 /* restore normal operation, after disabling dma mode */ 237 nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); 238 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 239 spin_unlock_irqrestore(&fifo->base.lock, flags); 240 241 return nvkm_fifo_channel_fini(&chan->base, suspend); 242 } 243 244 static struct nvkm_ofuncs 245 nv04_fifo_ofuncs = { 246 .ctor = nv04_fifo_chan_ctor, 247 .dtor = nv04_fifo_chan_dtor, 248 .init = nv04_fifo_chan_init, 249 .fini = nv04_fifo_chan_fini, 250 .map = _nvkm_fifo_channel_map, 251 .rd32 = _nvkm_fifo_channel_rd32, 252 .wr32 = _nvkm_fifo_channel_wr32, 253 .ntfy = _nvkm_fifo_channel_ntfy 254 }; 255 256 static struct nvkm_oclass 257 nv04_fifo_sclass[] = { 258 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs }, 259 {} 260 }; 261 262 /******************************************************************************* 263 * FIFO context - basically just the instmem reserved for the channel 264 ******************************************************************************/ 265 266 int 267 nv04_fifo_context_ctor(struct nvkm_object *parent, 268 struct nvkm_object *engine, 269 struct nvkm_oclass *oclass, void *data, u32 size, 270 struct nvkm_object **pobject) 271 { 272 struct nv04_fifo_base *base; 273 int ret; 274 275 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, 276 0x1000, NVOBJ_FLAG_HEAP, &base); 277 *pobject = nv_object(base); 278 if (ret) 279 return ret; 280 281 return 0; 282 } 283 284 static struct nvkm_oclass 285 nv04_fifo_cclass = { 286 .handle = NV_ENGCTX(FIFO, 0x04), 287 .ofuncs = &(struct nvkm_ofuncs) { 288 .ctor = nv04_fifo_context_ctor, 289 .dtor = _nvkm_fifo_context_dtor, 290 .init = _nvkm_fifo_context_init, 291 .fini = _nvkm_fifo_context_fini, 292 .rd32 = _nvkm_fifo_context_rd32, 293 .wr32 = _nvkm_fifo_context_wr32, 294 }, 295 }; 296 297 /******************************************************************************* 298 * PFIFO engine 299 ******************************************************************************/ 300 301 void 302 nv04_fifo_pause(struct nvkm_fifo *obj, unsigned long *pflags) 303 __acquires(fifo->base.lock) 304 { 305 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base); 306 struct nvkm_device *device = fifo->base.engine.subdev.device; 307 unsigned long flags; 308 309 spin_lock_irqsave(&fifo->base.lock, flags); 310 *pflags = flags; 311 312 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000); 313 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); 314 315 /* in some cases the puller may be left in an inconsistent state 316 * if you try to stop it while it's busy translating handles. 317 * sometimes you get a CACHE_ERROR, sometimes it just fails 318 * silently; sending incorrect instance offsets to PGRAPH after 319 * it's started up again. 320 * 321 * to avoid this, we invalidate the most recently calculated 322 * instance. 323 */ 324 nvkm_msec(device, 2000, 325 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0); 326 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY)) 327 break; 328 ); 329 330 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) & 331 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 332 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 333 334 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000); 335 } 336 337 void 338 nv04_fifo_start(struct nvkm_fifo *obj, unsigned long *pflags) 339 __releases(fifo->base.lock) 340 { 341 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base); 342 struct nvkm_device *device = fifo->base.engine.subdev.device; 343 unsigned long flags = *pflags; 344 345 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); 346 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001); 347 348 spin_unlock_irqrestore(&fifo->base.lock, flags); 349 } 350 351 static const char * 352 nv_dma_state_err(u32 state) 353 { 354 static const char * const desc[] = { 355 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", 356 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" 357 }; 358 return desc[(state >> 29) & 0x7]; 359 } 360 361 static bool 362 nv04_fifo_swmthd(struct nv04_fifo *fifo, u32 chid, u32 addr, u32 data) 363 { 364 struct nvkm_device *device = fifo->base.engine.subdev.device; 365 struct nv04_fifo_chan *chan = NULL; 366 struct nvkm_handle *bind; 367 const int subc = (addr >> 13) & 0x7; 368 const int mthd = addr & 0x1ffc; 369 bool handled = false; 370 unsigned long flags; 371 u32 engine; 372 373 spin_lock_irqsave(&fifo->base.lock, flags); 374 if (likely(chid >= fifo->base.min && chid <= fifo->base.max)) 375 chan = (void *)fifo->base.channel[chid]; 376 if (unlikely(!chan)) 377 goto out; 378 379 switch (mthd) { 380 case 0x0000: 381 bind = nvkm_namedb_get(nv_namedb(chan), data); 382 if (unlikely(!bind)) 383 break; 384 385 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) { 386 engine = 0x0000000f << (subc * 4); 387 chan->subc[subc] = data; 388 handled = true; 389 390 nvkm_mask(device, NV04_PFIFO_CACHE1_ENGINE, engine, 0); 391 } 392 393 nvkm_namedb_put(bind); 394 break; 395 default: 396 engine = nvkm_rd32(device, NV04_PFIFO_CACHE1_ENGINE); 397 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) 398 break; 399 400 bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]); 401 if (likely(bind)) { 402 if (!nv_call(bind->object, mthd, data)) 403 handled = true; 404 nvkm_namedb_put(bind); 405 } 406 break; 407 } 408 409 out: 410 spin_unlock_irqrestore(&fifo->base.lock, flags); 411 return handled; 412 } 413 414 static void 415 nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get) 416 { 417 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 418 struct nvkm_device *device = subdev->device; 419 u32 mthd, data; 420 int ptr; 421 422 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my 423 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests 424 * show that it wraps around to the start at GET=0x800.. No clue as to 425 * why.. 426 */ 427 ptr = (get & 0x7ff) >> 2; 428 429 if (device->card_type < NV_40) { 430 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr)); 431 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr)); 432 } else { 433 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr)); 434 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr)); 435 } 436 437 if (!nv04_fifo_swmthd(fifo, chid, mthd, data)) { 438 const char *client_name = 439 nvkm_client_name_for_fifo_chid(&fifo->base, chid); 440 nvkm_error(subdev, "CACHE_ERROR - " 441 "ch %d [%s] subc %d mthd %04x data %08x\n", 442 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, 443 data); 444 } 445 446 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 447 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 448 449 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 450 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1); 451 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 452 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 453 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1); 454 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0); 455 456 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 457 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 458 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 459 } 460 461 static void 462 nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) 463 { 464 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 465 struct nvkm_device *device = subdev->device; 466 u32 dma_get = nvkm_rd32(device, 0x003244); 467 u32 dma_put = nvkm_rd32(device, 0x003240); 468 u32 push = nvkm_rd32(device, 0x003220); 469 u32 state = nvkm_rd32(device, 0x003228); 470 const char *client_name; 471 472 client_name = nvkm_client_name_for_fifo_chid(&fifo->base, chid); 473 474 if (device->card_type == NV_50) { 475 u32 ho_get = nvkm_rd32(device, 0x003328); 476 u32 ho_put = nvkm_rd32(device, 0x003320); 477 u32 ib_get = nvkm_rd32(device, 0x003334); 478 u32 ib_put = nvkm_rd32(device, 0x003330); 479 480 nvkm_error(subdev, "DMA_PUSHER - " 481 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x " 482 "ib_put %08x state %08x (err: %s) push %08x\n", 483 chid, client_name, ho_get, dma_get, ho_put, dma_put, 484 ib_get, ib_put, state, nv_dma_state_err(state), 485 push); 486 487 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 488 nvkm_wr32(device, 0x003364, 0x00000000); 489 if (dma_get != dma_put || ho_get != ho_put) { 490 nvkm_wr32(device, 0x003244, dma_put); 491 nvkm_wr32(device, 0x003328, ho_put); 492 } else 493 if (ib_get != ib_put) 494 nvkm_wr32(device, 0x003334, ib_put); 495 } else { 496 nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x " 497 "state %08x (err: %s) push %08x\n", 498 chid, client_name, dma_get, dma_put, state, 499 nv_dma_state_err(state), push); 500 501 if (dma_get != dma_put) 502 nvkm_wr32(device, 0x003244, dma_put); 503 } 504 505 nvkm_wr32(device, 0x003228, 0x00000000); 506 nvkm_wr32(device, 0x003220, 0x00000001); 507 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 508 } 509 510 void 511 nv04_fifo_intr(struct nvkm_subdev *subdev) 512 { 513 struct nvkm_device *device = subdev->device; 514 struct nv04_fifo *fifo = (void *)subdev; 515 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0); 516 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask; 517 u32 reassign, chid, get, sem; 518 519 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; 520 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 521 522 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; 523 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); 524 525 if (stat & NV_PFIFO_INTR_CACHE_ERROR) { 526 nv04_fifo_cache_error(fifo, chid, get); 527 stat &= ~NV_PFIFO_INTR_CACHE_ERROR; 528 } 529 530 if (stat & NV_PFIFO_INTR_DMA_PUSHER) { 531 nv04_fifo_dma_pusher(fifo, chid); 532 stat &= ~NV_PFIFO_INTR_DMA_PUSHER; 533 } 534 535 if (stat & NV_PFIFO_INTR_SEMAPHORE) { 536 stat &= ~NV_PFIFO_INTR_SEMAPHORE; 537 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); 538 539 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE); 540 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 541 542 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 543 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 544 } 545 546 if (device->card_type == NV_50) { 547 if (stat & 0x00000010) { 548 stat &= ~0x00000010; 549 nvkm_wr32(device, 0x002100, 0x00000010); 550 } 551 552 if (stat & 0x40000000) { 553 nvkm_wr32(device, 0x002100, 0x40000000); 554 nvkm_fifo_uevent(&fifo->base); 555 stat &= ~0x40000000; 556 } 557 } 558 559 if (stat) { 560 nvkm_warn(subdev, "intr %08x\n", stat); 561 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); 562 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat); 563 } 564 565 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign); 566 } 567 568 static int 569 nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 570 struct nvkm_oclass *oclass, void *data, u32 size, 571 struct nvkm_object **pobject) 572 { 573 struct nv04_instmem *imem = nv04_instmem(parent); 574 struct nv04_fifo *fifo; 575 int ret; 576 577 ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &fifo); 578 *pobject = nv_object(fifo); 579 if (ret) 580 return ret; 581 582 nvkm_ramht_ref(imem->ramht, &fifo->ramht); 583 nvkm_gpuobj_ref(imem->ramro, &fifo->ramro); 584 nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc); 585 586 nv_subdev(fifo)->unit = 0x00000100; 587 nv_subdev(fifo)->intr = nv04_fifo_intr; 588 nv_engine(fifo)->cclass = &nv04_fifo_cclass; 589 nv_engine(fifo)->sclass = nv04_fifo_sclass; 590 fifo->base.pause = nv04_fifo_pause; 591 fifo->base.start = nv04_fifo_start; 592 fifo->ramfc_desc = nv04_ramfc; 593 return 0; 594 } 595 596 void 597 nv04_fifo_dtor(struct nvkm_object *object) 598 { 599 struct nv04_fifo *fifo = (void *)object; 600 nvkm_gpuobj_ref(NULL, &fifo->ramfc); 601 nvkm_gpuobj_ref(NULL, &fifo->ramro); 602 nvkm_ramht_ref(NULL, &fifo->ramht); 603 nvkm_fifo_destroy(&fifo->base); 604 } 605 606 int 607 nv04_fifo_init(struct nvkm_object *object) 608 { 609 struct nv04_fifo *fifo = (void *)object; 610 struct nvkm_device *device = fifo->base.engine.subdev.device; 611 int ret; 612 613 ret = nvkm_fifo_init(&fifo->base); 614 if (ret) 615 return ret; 616 617 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff); 618 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 619 620 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 621 ((fifo->ramht->bits - 9) << 16) | 622 (fifo->ramht->gpuobj.addr >> 8)); 623 nvkm_wr32(device, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8); 624 nvkm_wr32(device, NV03_PFIFO_RAMFC, fifo->ramfc->addr >> 8); 625 626 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); 627 628 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); 629 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); 630 631 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 632 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 633 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 634 return 0; 635 } 636 637 struct nvkm_oclass * 638 nv04_fifo_oclass = &(struct nvkm_oclass) { 639 .handle = NV_ENGINE(FIFO, 0x04), 640 .ofuncs = &(struct nvkm_ofuncs) { 641 .ctor = nv04_fifo_ctor, 642 .dtor = nv04_fifo_dtor, 643 .init = nv04_fifo_init, 644 .fini = _nvkm_fifo_fini, 645 }, 646 }; 647