1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv04.h" 25 26 #include <core/client.h> 27 #include <core/engctx.h> 28 #include <core/handle.h> 29 #include <core/ramht.h> 30 #include <subdev/instmem/nv04.h> 31 #include <subdev/timer.h> 32 33 #include <nvif/class.h> 34 #include <nvif/unpack.h> 35 36 static struct ramfc_desc 37 nv04_ramfc[] = { 38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 40 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 41 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, 42 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, 43 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, 44 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, 45 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, 46 {} 47 }; 48 49 /******************************************************************************* 50 * FIFO channel objects 51 ******************************************************************************/ 52 53 int 54 nv04_fifo_object_attach(struct nvkm_object *parent, 55 struct nvkm_object *object, u32 handle) 56 { 57 struct nv04_fifo *fifo = (void *)parent->engine; 58 struct nv04_fifo_chan *chan = (void *)parent; 59 u32 context, chid = chan->base.chid; 60 int ret; 61 62 if (nv_iclass(object, NV_GPUOBJ_CLASS)) 63 context = nv_gpuobj(object)->addr >> 4; 64 else 65 context = 0x00000004; /* just non-zero */ 66 67 switch (nv_engidx(object->engine)) { 68 case NVDEV_ENGINE_DMAOBJ: 69 case NVDEV_ENGINE_SW: 70 context |= 0x00000000; 71 break; 72 case NVDEV_ENGINE_GR: 73 context |= 0x00010000; 74 break; 75 case NVDEV_ENGINE_MPEG: 76 context |= 0x00020000; 77 break; 78 default: 79 return -EINVAL; 80 } 81 82 context |= 0x80000000; /* valid */ 83 context |= chid << 24; 84 85 mutex_lock(&nv_subdev(fifo)->mutex); 86 ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context); 87 mutex_unlock(&nv_subdev(fifo)->mutex); 88 return ret; 89 } 90 91 void 92 nv04_fifo_object_detach(struct nvkm_object *parent, int cookie) 93 { 94 struct nv04_fifo *fifo = (void *)parent->engine; 95 mutex_lock(&nv_subdev(fifo)->mutex); 96 nvkm_ramht_remove(fifo->ramht, cookie); 97 mutex_unlock(&nv_subdev(fifo)->mutex); 98 } 99 100 int 101 nv04_fifo_context_attach(struct nvkm_object *parent, 102 struct nvkm_object *object) 103 { 104 nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid; 105 return 0; 106 } 107 108 static int 109 nv04_fifo_chan_ctor(struct nvkm_object *parent, 110 struct nvkm_object *engine, 111 struct nvkm_oclass *oclass, void *data, u32 size, 112 struct nvkm_object **pobject) 113 { 114 union { 115 struct nv03_channel_dma_v0 v0; 116 } *args = data; 117 struct nv04_fifo *fifo = (void *)engine; 118 struct nv04_fifo_chan *chan; 119 int ret; 120 121 nv_ioctl(parent, "create channel dma size %d\n", size); 122 if (nvif_unpack(args->v0, 0, 0, false)) { 123 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " 124 "offset %016llx\n", args->v0.version, 125 args->v0.pushbuf, args->v0.offset); 126 } else 127 return ret; 128 129 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 130 0x10000, args->v0.pushbuf, 131 (1ULL << NVDEV_ENGINE_DMAOBJ) | 132 (1ULL << NVDEV_ENGINE_SW) | 133 (1ULL << NVDEV_ENGINE_GR), &chan); 134 *pobject = nv_object(chan); 135 if (ret) 136 return ret; 137 138 args->v0.chid = chan->base.chid; 139 140 nv_parent(chan)->object_attach = nv04_fifo_object_attach; 141 nv_parent(chan)->object_detach = nv04_fifo_object_detach; 142 nv_parent(chan)->context_attach = nv04_fifo_context_attach; 143 chan->ramfc = chan->base.chid * 32; 144 145 nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset); 146 nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset); 147 nv_wo32(fifo->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); 148 nv_wo32(fifo->ramfc, chan->ramfc + 0x10, 149 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 150 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 151 #ifdef __BIG_ENDIAN 152 NV_PFIFO_CACHE1_BIG_ENDIAN | 153 #endif 154 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 155 return 0; 156 } 157 158 void 159 nv04_fifo_chan_dtor(struct nvkm_object *object) 160 { 161 struct nv04_fifo *fifo = (void *)object->engine; 162 struct nv04_fifo_chan *chan = (void *)object; 163 struct ramfc_desc *c = fifo->ramfc_desc; 164 165 do { 166 nv_wo32(fifo->ramfc, chan->ramfc + c->ctxp, 0x00000000); 167 } while ((++c)->bits); 168 169 nvkm_fifo_channel_destroy(&chan->base); 170 } 171 172 int 173 nv04_fifo_chan_init(struct nvkm_object *object) 174 { 175 struct nv04_fifo *fifo = (void *)object->engine; 176 struct nv04_fifo_chan *chan = (void *)object; 177 struct nvkm_device *device = fifo->base.engine.subdev.device; 178 u32 mask = 1 << chan->base.chid; 179 unsigned long flags; 180 int ret; 181 182 ret = nvkm_fifo_channel_init(&chan->base); 183 if (ret) 184 return ret; 185 186 spin_lock_irqsave(&fifo->base.lock, flags); 187 nvkm_mask(device, NV04_PFIFO_MODE, mask, mask); 188 spin_unlock_irqrestore(&fifo->base.lock, flags); 189 return 0; 190 } 191 192 int 193 nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) 194 { 195 struct nv04_fifo *fifo = (void *)object->engine; 196 struct nv04_fifo_chan *chan = (void *)object; 197 struct nvkm_gpuobj *fctx = fifo->ramfc; 198 struct nvkm_device *device = fifo->base.engine.subdev.device; 199 struct ramfc_desc *c; 200 unsigned long flags; 201 u32 data = chan->ramfc; 202 u32 chid; 203 204 /* prevent fifo context switches */ 205 spin_lock_irqsave(&fifo->base.lock, flags); 206 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 207 208 /* if this channel is active, replace it with a null context */ 209 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; 210 if (chid == chan->base.chid) { 211 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); 212 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); 213 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); 214 215 c = fifo->ramfc_desc; 216 do { 217 u32 rm = ((1ULL << c->bits) - 1) << c->regs; 218 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; 219 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs; 220 u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm); 221 nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); 222 } while ((++c)->bits); 223 224 c = fifo->ramfc_desc; 225 do { 226 nvkm_wr32(device, c->regp, 0x00000000); 227 } while ((++c)->bits); 228 229 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); 230 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); 231 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); 232 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 233 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 234 } 235 236 /* restore normal operation, after disabling dma mode */ 237 nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); 238 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 239 spin_unlock_irqrestore(&fifo->base.lock, flags); 240 241 return nvkm_fifo_channel_fini(&chan->base, suspend); 242 } 243 244 static struct nvkm_ofuncs 245 nv04_fifo_ofuncs = { 246 .ctor = nv04_fifo_chan_ctor, 247 .dtor = nv04_fifo_chan_dtor, 248 .init = nv04_fifo_chan_init, 249 .fini = nv04_fifo_chan_fini, 250 .map = _nvkm_fifo_channel_map, 251 .rd32 = _nvkm_fifo_channel_rd32, 252 .wr32 = _nvkm_fifo_channel_wr32, 253 .ntfy = _nvkm_fifo_channel_ntfy 254 }; 255 256 static struct nvkm_oclass 257 nv04_fifo_sclass[] = { 258 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs }, 259 {} 260 }; 261 262 /******************************************************************************* 263 * FIFO context - basically just the instmem reserved for the channel 264 ******************************************************************************/ 265 266 int 267 nv04_fifo_context_ctor(struct nvkm_object *parent, 268 struct nvkm_object *engine, 269 struct nvkm_oclass *oclass, void *data, u32 size, 270 struct nvkm_object **pobject) 271 { 272 struct nv04_fifo_base *base; 273 int ret; 274 275 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, 276 0x1000, NVOBJ_FLAG_HEAP, &base); 277 *pobject = nv_object(base); 278 if (ret) 279 return ret; 280 281 return 0; 282 } 283 284 static struct nvkm_oclass 285 nv04_fifo_cclass = { 286 .handle = NV_ENGCTX(FIFO, 0x04), 287 .ofuncs = &(struct nvkm_ofuncs) { 288 .ctor = nv04_fifo_context_ctor, 289 .dtor = _nvkm_fifo_context_dtor, 290 .init = _nvkm_fifo_context_init, 291 .fini = _nvkm_fifo_context_fini, 292 .rd32 = _nvkm_fifo_context_rd32, 293 .wr32 = _nvkm_fifo_context_wr32, 294 }, 295 }; 296 297 /******************************************************************************* 298 * PFIFO engine 299 ******************************************************************************/ 300 301 void 302 nv04_fifo_pause(struct nvkm_fifo *obj, unsigned long *pflags) 303 __acquires(fifo->base.lock) 304 { 305 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base); 306 struct nvkm_device *device = fifo->base.engine.subdev.device; 307 unsigned long flags; 308 309 spin_lock_irqsave(&fifo->base.lock, flags); 310 *pflags = flags; 311 312 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000); 313 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); 314 315 /* in some cases the puller may be left in an inconsistent state 316 * if you try to stop it while it's busy translating handles. 317 * sometimes you get a CACHE_ERROR, sometimes it just fails 318 * silently; sending incorrect instance offsets to PGRAPH after 319 * it's started up again. 320 * 321 * to avoid this, we invalidate the most recently calculated 322 * instance. 323 */ 324 nvkm_msec(device, 2000, 325 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0); 326 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY)) 327 break; 328 ); 329 330 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) & 331 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 332 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 333 334 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000); 335 } 336 337 void 338 nv04_fifo_start(struct nvkm_fifo *obj, unsigned long *pflags) 339 __releases(fifo->base.lock) 340 { 341 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base); 342 struct nvkm_device *device = fifo->base.engine.subdev.device; 343 unsigned long flags = *pflags; 344 345 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); 346 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001); 347 348 spin_unlock_irqrestore(&fifo->base.lock, flags); 349 } 350 351 static const char * 352 nv_dma_state_err(u32 state) 353 { 354 static const char * const desc[] = { 355 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", 356 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" 357 }; 358 return desc[(state >> 29) & 0x7]; 359 } 360 361 static bool 362 nv04_fifo_swmthd(struct nv04_fifo *fifo, u32 chid, u32 addr, u32 data) 363 { 364 struct nvkm_device *device = fifo->base.engine.subdev.device; 365 struct nv04_fifo_chan *chan = NULL; 366 struct nvkm_handle *bind; 367 const int subc = (addr >> 13) & 0x7; 368 const int mthd = addr & 0x1ffc; 369 bool handled = false; 370 unsigned long flags; 371 u32 engine; 372 373 spin_lock_irqsave(&fifo->base.lock, flags); 374 if (likely(chid >= fifo->base.min && chid <= fifo->base.max)) 375 chan = (void *)fifo->base.channel[chid]; 376 if (unlikely(!chan)) 377 goto out; 378 379 switch (mthd) { 380 case 0x0000: 381 bind = nvkm_namedb_get(nv_namedb(chan), data); 382 if (unlikely(!bind)) 383 break; 384 385 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) { 386 engine = 0x0000000f << (subc * 4); 387 chan->subc[subc] = data; 388 handled = true; 389 390 nvkm_mask(device, NV04_PFIFO_CACHE1_ENGINE, engine, 0); 391 } 392 393 nvkm_namedb_put(bind); 394 break; 395 default: 396 engine = nvkm_rd32(device, NV04_PFIFO_CACHE1_ENGINE); 397 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) 398 break; 399 400 bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]); 401 if (likely(bind)) { 402 if (!nv_call(bind->object, mthd, data)) 403 handled = true; 404 nvkm_namedb_put(bind); 405 } 406 break; 407 } 408 409 out: 410 spin_unlock_irqrestore(&fifo->base.lock, flags); 411 return handled; 412 } 413 414 static void 415 nv04_fifo_cache_error(struct nvkm_device *device, 416 struct nv04_fifo *fifo, u32 chid, u32 get) 417 { 418 u32 mthd, data; 419 int ptr; 420 421 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my 422 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests 423 * show that it wraps around to the start at GET=0x800.. No clue as to 424 * why.. 425 */ 426 ptr = (get & 0x7ff) >> 2; 427 428 if (device->card_type < NV_40) { 429 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr)); 430 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr)); 431 } else { 432 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr)); 433 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr)); 434 } 435 436 if (!nv04_fifo_swmthd(fifo, chid, mthd, data)) { 437 const char *client_name = 438 nvkm_client_name_for_fifo_chid(&fifo->base, chid); 439 nv_error(fifo, 440 "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", 441 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, 442 data); 443 } 444 445 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 446 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 447 448 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 449 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1); 450 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 451 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 452 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1); 453 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0); 454 455 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 456 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 457 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 458 } 459 460 static void 461 nv04_fifo_dma_pusher(struct nvkm_device *device, 462 struct nv04_fifo *fifo, u32 chid) 463 { 464 const char *client_name; 465 u32 dma_get = nvkm_rd32(device, 0x003244); 466 u32 dma_put = nvkm_rd32(device, 0x003240); 467 u32 push = nvkm_rd32(device, 0x003220); 468 u32 state = nvkm_rd32(device, 0x003228); 469 470 client_name = nvkm_client_name_for_fifo_chid(&fifo->base, chid); 471 472 if (device->card_type == NV_50) { 473 u32 ho_get = nvkm_rd32(device, 0x003328); 474 u32 ho_put = nvkm_rd32(device, 0x003320); 475 u32 ib_get = nvkm_rd32(device, 0x003334); 476 u32 ib_put = nvkm_rd32(device, 0x003330); 477 478 nv_error(fifo, 479 "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", 480 chid, client_name, ho_get, dma_get, ho_put, dma_put, 481 ib_get, ib_put, state, nv_dma_state_err(state), push); 482 483 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 484 nvkm_wr32(device, 0x003364, 0x00000000); 485 if (dma_get != dma_put || ho_get != ho_put) { 486 nvkm_wr32(device, 0x003244, dma_put); 487 nvkm_wr32(device, 0x003328, ho_put); 488 } else 489 if (ib_get != ib_put) 490 nvkm_wr32(device, 0x003334, ib_put); 491 } else { 492 nv_error(fifo, 493 "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", 494 chid, client_name, dma_get, dma_put, state, 495 nv_dma_state_err(state), push); 496 497 if (dma_get != dma_put) 498 nvkm_wr32(device, 0x003244, dma_put); 499 } 500 501 nvkm_wr32(device, 0x003228, 0x00000000); 502 nvkm_wr32(device, 0x003220, 0x00000001); 503 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 504 } 505 506 void 507 nv04_fifo_intr(struct nvkm_subdev *subdev) 508 { 509 struct nvkm_device *device = nv_device(subdev); 510 struct nv04_fifo *fifo = (void *)subdev; 511 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0); 512 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask; 513 u32 reassign, chid, get, sem; 514 515 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; 516 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 517 518 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; 519 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); 520 521 if (stat & NV_PFIFO_INTR_CACHE_ERROR) { 522 nv04_fifo_cache_error(device, fifo, chid, get); 523 stat &= ~NV_PFIFO_INTR_CACHE_ERROR; 524 } 525 526 if (stat & NV_PFIFO_INTR_DMA_PUSHER) { 527 nv04_fifo_dma_pusher(device, fifo, chid); 528 stat &= ~NV_PFIFO_INTR_DMA_PUSHER; 529 } 530 531 if (stat & NV_PFIFO_INTR_SEMAPHORE) { 532 stat &= ~NV_PFIFO_INTR_SEMAPHORE; 533 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); 534 535 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE); 536 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 537 538 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 539 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 540 } 541 542 if (device->card_type == NV_50) { 543 if (stat & 0x00000010) { 544 stat &= ~0x00000010; 545 nvkm_wr32(device, 0x002100, 0x00000010); 546 } 547 548 if (stat & 0x40000000) { 549 nvkm_wr32(device, 0x002100, 0x40000000); 550 nvkm_fifo_uevent(&fifo->base); 551 stat &= ~0x40000000; 552 } 553 } 554 555 if (stat) { 556 nv_warn(fifo, "unknown intr 0x%08x\n", stat); 557 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); 558 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat); 559 } 560 561 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign); 562 } 563 564 static int 565 nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 566 struct nvkm_oclass *oclass, void *data, u32 size, 567 struct nvkm_object **pobject) 568 { 569 struct nv04_instmem *imem = nv04_instmem(parent); 570 struct nv04_fifo *fifo; 571 int ret; 572 573 ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &fifo); 574 *pobject = nv_object(fifo); 575 if (ret) 576 return ret; 577 578 nvkm_ramht_ref(imem->ramht, &fifo->ramht); 579 nvkm_gpuobj_ref(imem->ramro, &fifo->ramro); 580 nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc); 581 582 nv_subdev(fifo)->unit = 0x00000100; 583 nv_subdev(fifo)->intr = nv04_fifo_intr; 584 nv_engine(fifo)->cclass = &nv04_fifo_cclass; 585 nv_engine(fifo)->sclass = nv04_fifo_sclass; 586 fifo->base.pause = nv04_fifo_pause; 587 fifo->base.start = nv04_fifo_start; 588 fifo->ramfc_desc = nv04_ramfc; 589 return 0; 590 } 591 592 void 593 nv04_fifo_dtor(struct nvkm_object *object) 594 { 595 struct nv04_fifo *fifo = (void *)object; 596 nvkm_gpuobj_ref(NULL, &fifo->ramfc); 597 nvkm_gpuobj_ref(NULL, &fifo->ramro); 598 nvkm_ramht_ref(NULL, &fifo->ramht); 599 nvkm_fifo_destroy(&fifo->base); 600 } 601 602 int 603 nv04_fifo_init(struct nvkm_object *object) 604 { 605 struct nv04_fifo *fifo = (void *)object; 606 struct nvkm_device *device = fifo->base.engine.subdev.device; 607 int ret; 608 609 ret = nvkm_fifo_init(&fifo->base); 610 if (ret) 611 return ret; 612 613 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff); 614 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 615 616 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 617 ((fifo->ramht->bits - 9) << 16) | 618 (fifo->ramht->gpuobj.addr >> 8)); 619 nvkm_wr32(device, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8); 620 nvkm_wr32(device, NV03_PFIFO_RAMFC, fifo->ramfc->addr >> 8); 621 622 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); 623 624 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); 625 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); 626 627 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 628 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 629 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 630 return 0; 631 } 632 633 struct nvkm_oclass * 634 nv04_fifo_oclass = &(struct nvkm_oclass) { 635 .handle = NV_ENGINE(FIFO, 0x04), 636 .ofuncs = &(struct nvkm_ofuncs) { 637 .ctor = nv04_fifo_ctor, 638 .dtor = nv04_fifo_dtor, 639 .init = nv04_fifo_init, 640 .fini = _nvkm_fifo_fini, 641 }, 642 }; 643