1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv04.h" 25 26 #include <core/client.h> 27 #include <core/device.h> 28 #include <core/engctx.h> 29 #include <core/handle.h> 30 #include <core/ramht.h> 31 #include <subdev/instmem/nv04.h> 32 #include <subdev/timer.h> 33 34 #include <nvif/class.h> 35 #include <nvif/unpack.h> 36 37 static struct ramfc_desc 38 nv04_ramfc[] = { 39 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 40 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 41 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 42 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, 43 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, 44 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, 45 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, 46 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, 47 {} 48 }; 49 50 /******************************************************************************* 51 * FIFO channel objects 52 ******************************************************************************/ 53 54 int 55 nv04_fifo_object_attach(struct nvkm_object *parent, 56 struct nvkm_object *object, u32 handle) 57 { 58 struct nv04_fifo_priv *priv = (void *)parent->engine; 59 struct nv04_fifo_chan *chan = (void *)parent; 60 u32 context, chid = chan->base.chid; 61 int ret; 62 63 if (nv_iclass(object, NV_GPUOBJ_CLASS)) 64 context = nv_gpuobj(object)->addr >> 4; 65 else 66 context = 0x00000004; /* just non-zero */ 67 68 switch (nv_engidx(object->engine)) { 69 case NVDEV_ENGINE_DMAOBJ: 70 case NVDEV_ENGINE_SW: 71 context |= 0x00000000; 72 break; 73 case NVDEV_ENGINE_GR: 74 context |= 0x00010000; 75 break; 76 case NVDEV_ENGINE_MPEG: 77 context |= 0x00020000; 78 break; 79 default: 80 return -EINVAL; 81 } 82 83 context |= 0x80000000; /* valid */ 84 context |= chid << 24; 85 86 mutex_lock(&nv_subdev(priv)->mutex); 87 ret = nvkm_ramht_insert(priv->ramht, chid, handle, context); 88 mutex_unlock(&nv_subdev(priv)->mutex); 89 return ret; 90 } 91 92 void 93 nv04_fifo_object_detach(struct nvkm_object *parent, int cookie) 94 { 95 struct nv04_fifo_priv *priv = (void *)parent->engine; 96 mutex_lock(&nv_subdev(priv)->mutex); 97 nvkm_ramht_remove(priv->ramht, cookie); 98 mutex_unlock(&nv_subdev(priv)->mutex); 99 } 100 101 int 102 nv04_fifo_context_attach(struct nvkm_object *parent, 103 struct nvkm_object *object) 104 { 105 nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid; 106 return 0; 107 } 108 109 static int 110 nv04_fifo_chan_ctor(struct nvkm_object *parent, 111 struct nvkm_object *engine, 112 struct nvkm_oclass *oclass, void *data, u32 size, 113 struct nvkm_object **pobject) 114 { 115 union { 116 struct nv03_channel_dma_v0 v0; 117 } *args = data; 118 struct nv04_fifo_priv *priv = (void *)engine; 119 struct nv04_fifo_chan *chan; 120 int ret; 121 122 nv_ioctl(parent, "create channel dma size %d\n", size); 123 if (nvif_unpack(args->v0, 0, 0, false)) { 124 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " 125 "offset %016llx\n", args->v0.version, 126 args->v0.pushbuf, args->v0.offset); 127 } else 128 return ret; 129 130 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 131 0x10000, args->v0.pushbuf, 132 (1ULL << NVDEV_ENGINE_DMAOBJ) | 133 (1ULL << NVDEV_ENGINE_SW) | 134 (1ULL << NVDEV_ENGINE_GR), &chan); 135 *pobject = nv_object(chan); 136 if (ret) 137 return ret; 138 139 args->v0.chid = chan->base.chid; 140 141 nv_parent(chan)->object_attach = nv04_fifo_object_attach; 142 nv_parent(chan)->object_detach = nv04_fifo_object_detach; 143 nv_parent(chan)->context_attach = nv04_fifo_context_attach; 144 chan->ramfc = chan->base.chid * 32; 145 146 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); 147 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); 148 nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); 149 nv_wo32(priv->ramfc, chan->ramfc + 0x10, 150 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 151 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 152 #ifdef __BIG_ENDIAN 153 NV_PFIFO_CACHE1_BIG_ENDIAN | 154 #endif 155 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 156 return 0; 157 } 158 159 void 160 nv04_fifo_chan_dtor(struct nvkm_object *object) 161 { 162 struct nv04_fifo_priv *priv = (void *)object->engine; 163 struct nv04_fifo_chan *chan = (void *)object; 164 struct ramfc_desc *c = priv->ramfc_desc; 165 166 do { 167 nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000); 168 } while ((++c)->bits); 169 170 nvkm_fifo_channel_destroy(&chan->base); 171 } 172 173 int 174 nv04_fifo_chan_init(struct nvkm_object *object) 175 { 176 struct nv04_fifo_priv *priv = (void *)object->engine; 177 struct nv04_fifo_chan *chan = (void *)object; 178 u32 mask = 1 << chan->base.chid; 179 unsigned long flags; 180 int ret; 181 182 ret = nvkm_fifo_channel_init(&chan->base); 183 if (ret) 184 return ret; 185 186 spin_lock_irqsave(&priv->base.lock, flags); 187 nv_mask(priv, NV04_PFIFO_MODE, mask, mask); 188 spin_unlock_irqrestore(&priv->base.lock, flags); 189 return 0; 190 } 191 192 int 193 nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) 194 { 195 struct nv04_fifo_priv *priv = (void *)object->engine; 196 struct nv04_fifo_chan *chan = (void *)object; 197 struct nvkm_gpuobj *fctx = priv->ramfc; 198 struct ramfc_desc *c; 199 unsigned long flags; 200 u32 data = chan->ramfc; 201 u32 chid; 202 203 /* prevent fifo context switches */ 204 spin_lock_irqsave(&priv->base.lock, flags); 205 nv_wr32(priv, NV03_PFIFO_CACHES, 0); 206 207 /* if this channel is active, replace it with a null context */ 208 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; 209 if (chid == chan->base.chid) { 210 nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); 211 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0); 212 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); 213 214 c = priv->ramfc_desc; 215 do { 216 u32 rm = ((1ULL << c->bits) - 1) << c->regs; 217 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; 218 u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs; 219 u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm); 220 nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); 221 } while ((++c)->bits); 222 223 c = priv->ramfc_desc; 224 do { 225 nv_wr32(priv, c->regp, 0x00000000); 226 } while ((++c)->bits); 227 228 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0); 229 nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0); 230 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); 231 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); 232 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 233 } 234 235 /* restore normal operation, after disabling dma mode */ 236 nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); 237 nv_wr32(priv, NV03_PFIFO_CACHES, 1); 238 spin_unlock_irqrestore(&priv->base.lock, flags); 239 240 return nvkm_fifo_channel_fini(&chan->base, suspend); 241 } 242 243 static struct nvkm_ofuncs 244 nv04_fifo_ofuncs = { 245 .ctor = nv04_fifo_chan_ctor, 246 .dtor = nv04_fifo_chan_dtor, 247 .init = nv04_fifo_chan_init, 248 .fini = nv04_fifo_chan_fini, 249 .map = _nvkm_fifo_channel_map, 250 .rd32 = _nvkm_fifo_channel_rd32, 251 .wr32 = _nvkm_fifo_channel_wr32, 252 .ntfy = _nvkm_fifo_channel_ntfy 253 }; 254 255 static struct nvkm_oclass 256 nv04_fifo_sclass[] = { 257 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs }, 258 {} 259 }; 260 261 /******************************************************************************* 262 * FIFO context - basically just the instmem reserved for the channel 263 ******************************************************************************/ 264 265 int 266 nv04_fifo_context_ctor(struct nvkm_object *parent, 267 struct nvkm_object *engine, 268 struct nvkm_oclass *oclass, void *data, u32 size, 269 struct nvkm_object **pobject) 270 { 271 struct nv04_fifo_base *base; 272 int ret; 273 274 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, 275 0x1000, NVOBJ_FLAG_HEAP, &base); 276 *pobject = nv_object(base); 277 if (ret) 278 return ret; 279 280 return 0; 281 } 282 283 static struct nvkm_oclass 284 nv04_fifo_cclass = { 285 .handle = NV_ENGCTX(FIFO, 0x04), 286 .ofuncs = &(struct nvkm_ofuncs) { 287 .ctor = nv04_fifo_context_ctor, 288 .dtor = _nvkm_fifo_context_dtor, 289 .init = _nvkm_fifo_context_init, 290 .fini = _nvkm_fifo_context_fini, 291 .rd32 = _nvkm_fifo_context_rd32, 292 .wr32 = _nvkm_fifo_context_wr32, 293 }, 294 }; 295 296 /******************************************************************************* 297 * PFIFO engine 298 ******************************************************************************/ 299 300 void 301 nv04_fifo_pause(struct nvkm_fifo *pfifo, unsigned long *pflags) 302 __acquires(priv->base.lock) 303 { 304 struct nv04_fifo_priv *priv = (void *)pfifo; 305 unsigned long flags; 306 307 spin_lock_irqsave(&priv->base.lock, flags); 308 *pflags = flags; 309 310 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000); 311 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); 312 313 /* in some cases the puller may be left in an inconsistent state 314 * if you try to stop it while it's busy translating handles. 315 * sometimes you get a CACHE_ERROR, sometimes it just fails 316 * silently; sending incorrect instance offsets to PGRAPH after 317 * it's started up again. 318 * 319 * to avoid this, we invalidate the most recently calculated 320 * instance. 321 */ 322 if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0, 323 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000)) 324 nv_warn(priv, "timeout idling puller\n"); 325 326 if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) & 327 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 328 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 329 330 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000); 331 } 332 333 void 334 nv04_fifo_start(struct nvkm_fifo *pfifo, unsigned long *pflags) 335 __releases(priv->base.lock) 336 { 337 struct nv04_fifo_priv *priv = (void *)pfifo; 338 unsigned long flags = *pflags; 339 340 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); 341 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001); 342 343 spin_unlock_irqrestore(&priv->base.lock, flags); 344 } 345 346 static const char * 347 nv_dma_state_err(u32 state) 348 { 349 static const char * const desc[] = { 350 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", 351 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" 352 }; 353 return desc[(state >> 29) & 0x7]; 354 } 355 356 static bool 357 nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data) 358 { 359 struct nv04_fifo_chan *chan = NULL; 360 struct nvkm_handle *bind; 361 const int subc = (addr >> 13) & 0x7; 362 const int mthd = addr & 0x1ffc; 363 bool handled = false; 364 unsigned long flags; 365 u32 engine; 366 367 spin_lock_irqsave(&priv->base.lock, flags); 368 if (likely(chid >= priv->base.min && chid <= priv->base.max)) 369 chan = (void *)priv->base.channel[chid]; 370 if (unlikely(!chan)) 371 goto out; 372 373 switch (mthd) { 374 case 0x0000: 375 bind = nvkm_namedb_get(nv_namedb(chan), data); 376 if (unlikely(!bind)) 377 break; 378 379 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) { 380 engine = 0x0000000f << (subc * 4); 381 chan->subc[subc] = data; 382 handled = true; 383 384 nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0); 385 } 386 387 nvkm_namedb_put(bind); 388 break; 389 default: 390 engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE); 391 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) 392 break; 393 394 bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]); 395 if (likely(bind)) { 396 if (!nv_call(bind->object, mthd, data)) 397 handled = true; 398 nvkm_namedb_put(bind); 399 } 400 break; 401 } 402 403 out: 404 spin_unlock_irqrestore(&priv->base.lock, flags); 405 return handled; 406 } 407 408 static void 409 nv04_fifo_cache_error(struct nvkm_device *device, 410 struct nv04_fifo_priv *priv, u32 chid, u32 get) 411 { 412 u32 mthd, data; 413 int ptr; 414 415 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my 416 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests 417 * show that it wraps around to the start at GET=0x800.. No clue as to 418 * why.. 419 */ 420 ptr = (get & 0x7ff) >> 2; 421 422 if (device->card_type < NV_40) { 423 mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr)); 424 data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr)); 425 } else { 426 mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr)); 427 data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr)); 428 } 429 430 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { 431 const char *client_name = 432 nvkm_client_name_for_fifo_chid(&priv->base, chid); 433 nv_error(priv, 434 "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", 435 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, 436 data); 437 } 438 439 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 440 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 441 442 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 443 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1); 444 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); 445 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 446 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1); 447 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0); 448 449 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 450 nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 451 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 452 } 453 454 static void 455 nv04_fifo_dma_pusher(struct nvkm_device *device, 456 struct nv04_fifo_priv *priv, u32 chid) 457 { 458 const char *client_name; 459 u32 dma_get = nv_rd32(priv, 0x003244); 460 u32 dma_put = nv_rd32(priv, 0x003240); 461 u32 push = nv_rd32(priv, 0x003220); 462 u32 state = nv_rd32(priv, 0x003228); 463 464 client_name = nvkm_client_name_for_fifo_chid(&priv->base, chid); 465 466 if (device->card_type == NV_50) { 467 u32 ho_get = nv_rd32(priv, 0x003328); 468 u32 ho_put = nv_rd32(priv, 0x003320); 469 u32 ib_get = nv_rd32(priv, 0x003334); 470 u32 ib_put = nv_rd32(priv, 0x003330); 471 472 nv_error(priv, 473 "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", 474 chid, client_name, ho_get, dma_get, ho_put, dma_put, 475 ib_get, ib_put, state, nv_dma_state_err(state), push); 476 477 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 478 nv_wr32(priv, 0x003364, 0x00000000); 479 if (dma_get != dma_put || ho_get != ho_put) { 480 nv_wr32(priv, 0x003244, dma_put); 481 nv_wr32(priv, 0x003328, ho_put); 482 } else 483 if (ib_get != ib_put) 484 nv_wr32(priv, 0x003334, ib_put); 485 } else { 486 nv_error(priv, 487 "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", 488 chid, client_name, dma_get, dma_put, state, 489 nv_dma_state_err(state), push); 490 491 if (dma_get != dma_put) 492 nv_wr32(priv, 0x003244, dma_put); 493 } 494 495 nv_wr32(priv, 0x003228, 0x00000000); 496 nv_wr32(priv, 0x003220, 0x00000001); 497 nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 498 } 499 500 void 501 nv04_fifo_intr(struct nvkm_subdev *subdev) 502 { 503 struct nvkm_device *device = nv_device(subdev); 504 struct nv04_fifo_priv *priv = (void *)subdev; 505 u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); 506 u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; 507 u32 reassign, chid, get, sem; 508 509 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; 510 nv_wr32(priv, NV03_PFIFO_CACHES, 0); 511 512 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; 513 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); 514 515 if (stat & NV_PFIFO_INTR_CACHE_ERROR) { 516 nv04_fifo_cache_error(device, priv, chid, get); 517 stat &= ~NV_PFIFO_INTR_CACHE_ERROR; 518 } 519 520 if (stat & NV_PFIFO_INTR_DMA_PUSHER) { 521 nv04_fifo_dma_pusher(device, priv, chid); 522 stat &= ~NV_PFIFO_INTR_DMA_PUSHER; 523 } 524 525 if (stat & NV_PFIFO_INTR_SEMAPHORE) { 526 stat &= ~NV_PFIFO_INTR_SEMAPHORE; 527 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); 528 529 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); 530 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 531 532 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); 533 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 534 } 535 536 if (device->card_type == NV_50) { 537 if (stat & 0x00000010) { 538 stat &= ~0x00000010; 539 nv_wr32(priv, 0x002100, 0x00000010); 540 } 541 542 if (stat & 0x40000000) { 543 nv_wr32(priv, 0x002100, 0x40000000); 544 nvkm_fifo_uevent(&priv->base); 545 stat &= ~0x40000000; 546 } 547 } 548 549 if (stat) { 550 nv_warn(priv, "unknown intr 0x%08x\n", stat); 551 nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); 552 nv_wr32(priv, NV03_PFIFO_INTR_0, stat); 553 } 554 555 nv_wr32(priv, NV03_PFIFO_CACHES, reassign); 556 } 557 558 static int 559 nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 560 struct nvkm_oclass *oclass, void *data, u32 size, 561 struct nvkm_object **pobject) 562 { 563 struct nv04_instmem_priv *imem = nv04_instmem(parent); 564 struct nv04_fifo_priv *priv; 565 int ret; 566 567 ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &priv); 568 *pobject = nv_object(priv); 569 if (ret) 570 return ret; 571 572 nvkm_ramht_ref(imem->ramht, &priv->ramht); 573 nvkm_gpuobj_ref(imem->ramro, &priv->ramro); 574 nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc); 575 576 nv_subdev(priv)->unit = 0x00000100; 577 nv_subdev(priv)->intr = nv04_fifo_intr; 578 nv_engine(priv)->cclass = &nv04_fifo_cclass; 579 nv_engine(priv)->sclass = nv04_fifo_sclass; 580 priv->base.pause = nv04_fifo_pause; 581 priv->base.start = nv04_fifo_start; 582 priv->ramfc_desc = nv04_ramfc; 583 return 0; 584 } 585 586 void 587 nv04_fifo_dtor(struct nvkm_object *object) 588 { 589 struct nv04_fifo_priv *priv = (void *)object; 590 nvkm_gpuobj_ref(NULL, &priv->ramfc); 591 nvkm_gpuobj_ref(NULL, &priv->ramro); 592 nvkm_ramht_ref(NULL, &priv->ramht); 593 nvkm_fifo_destroy(&priv->base); 594 } 595 596 int 597 nv04_fifo_init(struct nvkm_object *object) 598 { 599 struct nv04_fifo_priv *priv = (void *)object; 600 int ret; 601 602 ret = nvkm_fifo_init(&priv->base); 603 if (ret) 604 return ret; 605 606 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff); 607 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 608 609 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 610 ((priv->ramht->bits - 9) << 16) | 611 (priv->ramht->gpuobj.addr >> 8)); 612 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); 613 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8); 614 615 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); 616 617 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); 618 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); 619 620 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); 621 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 622 nv_wr32(priv, NV03_PFIFO_CACHES, 1); 623 return 0; 624 } 625 626 struct nvkm_oclass * 627 nv04_fifo_oclass = &(struct nvkm_oclass) { 628 .handle = NV_ENGINE(FIFO, 0x04), 629 .ofuncs = &(struct nvkm_ofuncs) { 630 .ctor = nv04_fifo_ctor, 631 .dtor = nv04_fifo_dtor, 632 .init = nv04_fifo_init, 633 .fini = _nvkm_fifo_fini, 634 }, 635 }; 636