1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv04.h" 25 26 #include <core/client.h> 27 #include <core/engctx.h> 28 #include <core/handle.h> 29 #include <core/ramht.h> 30 #include <subdev/instmem/nv04.h> 31 #include <subdev/timer.h> 32 33 #include <nvif/class.h> 34 #include <nvif/unpack.h> 35 36 static struct ramfc_desc 37 nv04_ramfc[] = { 38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 40 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 41 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, 42 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, 43 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, 44 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, 45 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, 46 {} 47 }; 48 49 /******************************************************************************* 50 * FIFO channel objects 51 ******************************************************************************/ 52 53 int 54 nv04_fifo_object_attach(struct nvkm_object *parent, 55 struct nvkm_object *object, u32 handle) 56 { 57 struct nv04_fifo_priv *priv = (void *)parent->engine; 58 struct nv04_fifo_chan *chan = (void *)parent; 59 u32 context, chid = chan->base.chid; 60 int ret; 61 62 if (nv_iclass(object, NV_GPUOBJ_CLASS)) 63 context = nv_gpuobj(object)->addr >> 4; 64 else 65 context = 0x00000004; /* just non-zero */ 66 67 switch (nv_engidx(object->engine)) { 68 case NVDEV_ENGINE_DMAOBJ: 69 case NVDEV_ENGINE_SW: 70 context |= 0x00000000; 71 break; 72 case NVDEV_ENGINE_GR: 73 context |= 0x00010000; 74 break; 75 case NVDEV_ENGINE_MPEG: 76 context |= 0x00020000; 77 break; 78 default: 79 return -EINVAL; 80 } 81 82 context |= 0x80000000; /* valid */ 83 context |= chid << 24; 84 85 mutex_lock(&nv_subdev(priv)->mutex); 86 ret = nvkm_ramht_insert(priv->ramht, chid, handle, context); 87 mutex_unlock(&nv_subdev(priv)->mutex); 88 return ret; 89 } 90 91 void 92 nv04_fifo_object_detach(struct nvkm_object *parent, int cookie) 93 { 94 struct nv04_fifo_priv *priv = (void *)parent->engine; 95 mutex_lock(&nv_subdev(priv)->mutex); 96 nvkm_ramht_remove(priv->ramht, cookie); 97 mutex_unlock(&nv_subdev(priv)->mutex); 98 } 99 100 int 101 nv04_fifo_context_attach(struct nvkm_object *parent, 102 struct nvkm_object *object) 103 { 104 nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid; 105 return 0; 106 } 107 108 static int 109 nv04_fifo_chan_ctor(struct nvkm_object *parent, 110 struct nvkm_object *engine, 111 struct nvkm_oclass *oclass, void *data, u32 size, 112 struct nvkm_object **pobject) 113 { 114 union { 115 struct nv03_channel_dma_v0 v0; 116 } *args = data; 117 struct nv04_fifo_priv *priv = (void *)engine; 118 struct nv04_fifo_chan *chan; 119 int ret; 120 121 nv_ioctl(parent, "create channel dma size %d\n", size); 122 if (nvif_unpack(args->v0, 0, 0, false)) { 123 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " 124 "offset %016llx\n", args->v0.version, 125 args->v0.pushbuf, args->v0.offset); 126 } else 127 return ret; 128 129 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 130 0x10000, args->v0.pushbuf, 131 (1ULL << NVDEV_ENGINE_DMAOBJ) | 132 (1ULL << NVDEV_ENGINE_SW) | 133 (1ULL << NVDEV_ENGINE_GR), &chan); 134 *pobject = nv_object(chan); 135 if (ret) 136 return ret; 137 138 args->v0.chid = chan->base.chid; 139 140 nv_parent(chan)->object_attach = nv04_fifo_object_attach; 141 nv_parent(chan)->object_detach = nv04_fifo_object_detach; 142 nv_parent(chan)->context_attach = nv04_fifo_context_attach; 143 chan->ramfc = chan->base.chid * 32; 144 145 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); 146 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); 147 nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); 148 nv_wo32(priv->ramfc, chan->ramfc + 0x10, 149 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 150 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 151 #ifdef __BIG_ENDIAN 152 NV_PFIFO_CACHE1_BIG_ENDIAN | 153 #endif 154 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 155 return 0; 156 } 157 158 void 159 nv04_fifo_chan_dtor(struct nvkm_object *object) 160 { 161 struct nv04_fifo_priv *priv = (void *)object->engine; 162 struct nv04_fifo_chan *chan = (void *)object; 163 struct ramfc_desc *c = priv->ramfc_desc; 164 165 do { 166 nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000); 167 } while ((++c)->bits); 168 169 nvkm_fifo_channel_destroy(&chan->base); 170 } 171 172 int 173 nv04_fifo_chan_init(struct nvkm_object *object) 174 { 175 struct nv04_fifo_priv *priv = (void *)object->engine; 176 struct nv04_fifo_chan *chan = (void *)object; 177 u32 mask = 1 << chan->base.chid; 178 unsigned long flags; 179 int ret; 180 181 ret = nvkm_fifo_channel_init(&chan->base); 182 if (ret) 183 return ret; 184 185 spin_lock_irqsave(&priv->base.lock, flags); 186 nv_mask(priv, NV04_PFIFO_MODE, mask, mask); 187 spin_unlock_irqrestore(&priv->base.lock, flags); 188 return 0; 189 } 190 191 int 192 nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) 193 { 194 struct nv04_fifo_priv *priv = (void *)object->engine; 195 struct nv04_fifo_chan *chan = (void *)object; 196 struct nvkm_gpuobj *fctx = priv->ramfc; 197 struct ramfc_desc *c; 198 unsigned long flags; 199 u32 data = chan->ramfc; 200 u32 chid; 201 202 /* prevent fifo context switches */ 203 spin_lock_irqsave(&priv->base.lock, flags); 204 nv_wr32(priv, NV03_PFIFO_CACHES, 0); 205 206 /* if this channel is active, replace it with a null context */ 207 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; 208 if (chid == chan->base.chid) { 209 nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); 210 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0); 211 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); 212 213 c = priv->ramfc_desc; 214 do { 215 u32 rm = ((1ULL << c->bits) - 1) << c->regs; 216 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; 217 u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs; 218 u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm); 219 nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); 220 } while ((++c)->bits); 221 222 c = priv->ramfc_desc; 223 do { 224 nv_wr32(priv, c->regp, 0x00000000); 225 } while ((++c)->bits); 226 227 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0); 228 nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0); 229 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); 230 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); 231 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 232 } 233 234 /* restore normal operation, after disabling dma mode */ 235 nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); 236 nv_wr32(priv, NV03_PFIFO_CACHES, 1); 237 spin_unlock_irqrestore(&priv->base.lock, flags); 238 239 return nvkm_fifo_channel_fini(&chan->base, suspend); 240 } 241 242 static struct nvkm_ofuncs 243 nv04_fifo_ofuncs = { 244 .ctor = nv04_fifo_chan_ctor, 245 .dtor = nv04_fifo_chan_dtor, 246 .init = nv04_fifo_chan_init, 247 .fini = nv04_fifo_chan_fini, 248 .map = _nvkm_fifo_channel_map, 249 .rd32 = _nvkm_fifo_channel_rd32, 250 .wr32 = _nvkm_fifo_channel_wr32, 251 .ntfy = _nvkm_fifo_channel_ntfy 252 }; 253 254 static struct nvkm_oclass 255 nv04_fifo_sclass[] = { 256 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs }, 257 {} 258 }; 259 260 /******************************************************************************* 261 * FIFO context - basically just the instmem reserved for the channel 262 ******************************************************************************/ 263 264 int 265 nv04_fifo_context_ctor(struct nvkm_object *parent, 266 struct nvkm_object *engine, 267 struct nvkm_oclass *oclass, void *data, u32 size, 268 struct nvkm_object **pobject) 269 { 270 struct nv04_fifo_base *base; 271 int ret; 272 273 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, 274 0x1000, NVOBJ_FLAG_HEAP, &base); 275 *pobject = nv_object(base); 276 if (ret) 277 return ret; 278 279 return 0; 280 } 281 282 static struct nvkm_oclass 283 nv04_fifo_cclass = { 284 .handle = NV_ENGCTX(FIFO, 0x04), 285 .ofuncs = &(struct nvkm_ofuncs) { 286 .ctor = nv04_fifo_context_ctor, 287 .dtor = _nvkm_fifo_context_dtor, 288 .init = _nvkm_fifo_context_init, 289 .fini = _nvkm_fifo_context_fini, 290 .rd32 = _nvkm_fifo_context_rd32, 291 .wr32 = _nvkm_fifo_context_wr32, 292 }, 293 }; 294 295 /******************************************************************************* 296 * PFIFO engine 297 ******************************************************************************/ 298 299 void 300 nv04_fifo_pause(struct nvkm_fifo *pfifo, unsigned long *pflags) 301 __acquires(priv->base.lock) 302 { 303 struct nv04_fifo_priv *priv = (void *)pfifo; 304 unsigned long flags; 305 306 spin_lock_irqsave(&priv->base.lock, flags); 307 *pflags = flags; 308 309 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000); 310 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); 311 312 /* in some cases the puller may be left in an inconsistent state 313 * if you try to stop it while it's busy translating handles. 314 * sometimes you get a CACHE_ERROR, sometimes it just fails 315 * silently; sending incorrect instance offsets to PGRAPH after 316 * it's started up again. 317 * 318 * to avoid this, we invalidate the most recently calculated 319 * instance. 320 */ 321 if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0, 322 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000)) 323 nv_warn(priv, "timeout idling puller\n"); 324 325 if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) & 326 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 327 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 328 329 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000); 330 } 331 332 void 333 nv04_fifo_start(struct nvkm_fifo *pfifo, unsigned long *pflags) 334 __releases(priv->base.lock) 335 { 336 struct nv04_fifo_priv *priv = (void *)pfifo; 337 unsigned long flags = *pflags; 338 339 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); 340 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001); 341 342 spin_unlock_irqrestore(&priv->base.lock, flags); 343 } 344 345 static const char * 346 nv_dma_state_err(u32 state) 347 { 348 static const char * const desc[] = { 349 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", 350 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" 351 }; 352 return desc[(state >> 29) & 0x7]; 353 } 354 355 static bool 356 nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data) 357 { 358 struct nv04_fifo_chan *chan = NULL; 359 struct nvkm_handle *bind; 360 const int subc = (addr >> 13) & 0x7; 361 const int mthd = addr & 0x1ffc; 362 bool handled = false; 363 unsigned long flags; 364 u32 engine; 365 366 spin_lock_irqsave(&priv->base.lock, flags); 367 if (likely(chid >= priv->base.min && chid <= priv->base.max)) 368 chan = (void *)priv->base.channel[chid]; 369 if (unlikely(!chan)) 370 goto out; 371 372 switch (mthd) { 373 case 0x0000: 374 bind = nvkm_namedb_get(nv_namedb(chan), data); 375 if (unlikely(!bind)) 376 break; 377 378 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) { 379 engine = 0x0000000f << (subc * 4); 380 chan->subc[subc] = data; 381 handled = true; 382 383 nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0); 384 } 385 386 nvkm_namedb_put(bind); 387 break; 388 default: 389 engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE); 390 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) 391 break; 392 393 bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]); 394 if (likely(bind)) { 395 if (!nv_call(bind->object, mthd, data)) 396 handled = true; 397 nvkm_namedb_put(bind); 398 } 399 break; 400 } 401 402 out: 403 spin_unlock_irqrestore(&priv->base.lock, flags); 404 return handled; 405 } 406 407 static void 408 nv04_fifo_cache_error(struct nvkm_device *device, 409 struct nv04_fifo_priv *priv, u32 chid, u32 get) 410 { 411 u32 mthd, data; 412 int ptr; 413 414 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my 415 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests 416 * show that it wraps around to the start at GET=0x800.. No clue as to 417 * why.. 418 */ 419 ptr = (get & 0x7ff) >> 2; 420 421 if (device->card_type < NV_40) { 422 mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr)); 423 data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr)); 424 } else { 425 mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr)); 426 data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr)); 427 } 428 429 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { 430 const char *client_name = 431 nvkm_client_name_for_fifo_chid(&priv->base, chid); 432 nv_error(priv, 433 "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", 434 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, 435 data); 436 } 437 438 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 439 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 440 441 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 442 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1); 443 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); 444 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 445 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1); 446 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0); 447 448 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 449 nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 450 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 451 } 452 453 static void 454 nv04_fifo_dma_pusher(struct nvkm_device *device, 455 struct nv04_fifo_priv *priv, u32 chid) 456 { 457 const char *client_name; 458 u32 dma_get = nv_rd32(priv, 0x003244); 459 u32 dma_put = nv_rd32(priv, 0x003240); 460 u32 push = nv_rd32(priv, 0x003220); 461 u32 state = nv_rd32(priv, 0x003228); 462 463 client_name = nvkm_client_name_for_fifo_chid(&priv->base, chid); 464 465 if (device->card_type == NV_50) { 466 u32 ho_get = nv_rd32(priv, 0x003328); 467 u32 ho_put = nv_rd32(priv, 0x003320); 468 u32 ib_get = nv_rd32(priv, 0x003334); 469 u32 ib_put = nv_rd32(priv, 0x003330); 470 471 nv_error(priv, 472 "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", 473 chid, client_name, ho_get, dma_get, ho_put, dma_put, 474 ib_get, ib_put, state, nv_dma_state_err(state), push); 475 476 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 477 nv_wr32(priv, 0x003364, 0x00000000); 478 if (dma_get != dma_put || ho_get != ho_put) { 479 nv_wr32(priv, 0x003244, dma_put); 480 nv_wr32(priv, 0x003328, ho_put); 481 } else 482 if (ib_get != ib_put) 483 nv_wr32(priv, 0x003334, ib_put); 484 } else { 485 nv_error(priv, 486 "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", 487 chid, client_name, dma_get, dma_put, state, 488 nv_dma_state_err(state), push); 489 490 if (dma_get != dma_put) 491 nv_wr32(priv, 0x003244, dma_put); 492 } 493 494 nv_wr32(priv, 0x003228, 0x00000000); 495 nv_wr32(priv, 0x003220, 0x00000001); 496 nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 497 } 498 499 void 500 nv04_fifo_intr(struct nvkm_subdev *subdev) 501 { 502 struct nvkm_device *device = nv_device(subdev); 503 struct nv04_fifo_priv *priv = (void *)subdev; 504 u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); 505 u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; 506 u32 reassign, chid, get, sem; 507 508 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; 509 nv_wr32(priv, NV03_PFIFO_CACHES, 0); 510 511 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; 512 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); 513 514 if (stat & NV_PFIFO_INTR_CACHE_ERROR) { 515 nv04_fifo_cache_error(device, priv, chid, get); 516 stat &= ~NV_PFIFO_INTR_CACHE_ERROR; 517 } 518 519 if (stat & NV_PFIFO_INTR_DMA_PUSHER) { 520 nv04_fifo_dma_pusher(device, priv, chid); 521 stat &= ~NV_PFIFO_INTR_DMA_PUSHER; 522 } 523 524 if (stat & NV_PFIFO_INTR_SEMAPHORE) { 525 stat &= ~NV_PFIFO_INTR_SEMAPHORE; 526 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); 527 528 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); 529 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 530 531 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); 532 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 533 } 534 535 if (device->card_type == NV_50) { 536 if (stat & 0x00000010) { 537 stat &= ~0x00000010; 538 nv_wr32(priv, 0x002100, 0x00000010); 539 } 540 541 if (stat & 0x40000000) { 542 nv_wr32(priv, 0x002100, 0x40000000); 543 nvkm_fifo_uevent(&priv->base); 544 stat &= ~0x40000000; 545 } 546 } 547 548 if (stat) { 549 nv_warn(priv, "unknown intr 0x%08x\n", stat); 550 nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); 551 nv_wr32(priv, NV03_PFIFO_INTR_0, stat); 552 } 553 554 nv_wr32(priv, NV03_PFIFO_CACHES, reassign); 555 } 556 557 static int 558 nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 559 struct nvkm_oclass *oclass, void *data, u32 size, 560 struct nvkm_object **pobject) 561 { 562 struct nv04_instmem *imem = nv04_instmem(parent); 563 struct nv04_fifo_priv *priv; 564 int ret; 565 566 ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &priv); 567 *pobject = nv_object(priv); 568 if (ret) 569 return ret; 570 571 nvkm_ramht_ref(imem->ramht, &priv->ramht); 572 nvkm_gpuobj_ref(imem->ramro, &priv->ramro); 573 nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc); 574 575 nv_subdev(priv)->unit = 0x00000100; 576 nv_subdev(priv)->intr = nv04_fifo_intr; 577 nv_engine(priv)->cclass = &nv04_fifo_cclass; 578 nv_engine(priv)->sclass = nv04_fifo_sclass; 579 priv->base.pause = nv04_fifo_pause; 580 priv->base.start = nv04_fifo_start; 581 priv->ramfc_desc = nv04_ramfc; 582 return 0; 583 } 584 585 void 586 nv04_fifo_dtor(struct nvkm_object *object) 587 { 588 struct nv04_fifo_priv *priv = (void *)object; 589 nvkm_gpuobj_ref(NULL, &priv->ramfc); 590 nvkm_gpuobj_ref(NULL, &priv->ramro); 591 nvkm_ramht_ref(NULL, &priv->ramht); 592 nvkm_fifo_destroy(&priv->base); 593 } 594 595 int 596 nv04_fifo_init(struct nvkm_object *object) 597 { 598 struct nv04_fifo_priv *priv = (void *)object; 599 int ret; 600 601 ret = nvkm_fifo_init(&priv->base); 602 if (ret) 603 return ret; 604 605 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff); 606 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 607 608 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 609 ((priv->ramht->bits - 9) << 16) | 610 (priv->ramht->gpuobj.addr >> 8)); 611 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); 612 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8); 613 614 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); 615 616 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); 617 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); 618 619 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); 620 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 621 nv_wr32(priv, NV03_PFIFO_CACHES, 1); 622 return 0; 623 } 624 625 struct nvkm_oclass * 626 nv04_fifo_oclass = &(struct nvkm_oclass) { 627 .handle = NV_ENGINE(FIFO, 0x04), 628 .ofuncs = &(struct nvkm_ofuncs) { 629 .ctor = nv04_fifo_ctor, 630 .dtor = nv04_fifo_dtor, 631 .init = nv04_fifo_init, 632 .fini = _nvkm_fifo_fini, 633 }, 634 }; 635