1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv04.h" 25 26 #include <core/client.h> 27 #include <core/engctx.h> 28 #include <core/handle.h> 29 #include <core/ramht.h> 30 #include <subdev/instmem.h> 31 #include <subdev/timer.h> 32 33 #include <nvif/class.h> 34 #include <nvif/unpack.h> 35 36 static struct ramfc_desc 37 nv04_ramfc[] = { 38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 40 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 41 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, 42 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, 43 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, 44 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, 45 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, 46 {} 47 }; 48 49 /******************************************************************************* 50 * FIFO channel objects 51 ******************************************************************************/ 52 53 int 54 nv04_fifo_object_attach(struct nvkm_object *parent, 55 struct nvkm_object *object, u32 handle) 56 { 57 struct nv04_fifo *fifo = (void *)parent->engine; 58 struct nv04_fifo_chan *chan = (void *)parent; 59 struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; 60 u32 context, chid = chan->base.chid; 61 int ret; 62 63 if (nv_iclass(object, NV_GPUOBJ_CLASS)) 64 context = nv_gpuobj(object)->addr >> 4; 65 else 66 context = 0x00000004; /* just non-zero */ 67 68 if (object->engine) { 69 switch (nv_engidx(object->engine)) { 70 case NVDEV_ENGINE_DMAOBJ: 71 case NVDEV_ENGINE_SW: 72 context |= 0x00000000; 73 break; 74 case NVDEV_ENGINE_GR: 75 context |= 0x00010000; 76 break; 77 case NVDEV_ENGINE_MPEG: 78 context |= 0x00020000; 79 break; 80 default: 81 return -EINVAL; 82 } 83 } 84 85 context |= 0x80000000; /* valid */ 86 context |= chid << 24; 87 88 mutex_lock(&nv_subdev(fifo)->mutex); 89 ret = nvkm_ramht_insert(imem->ramht, NULL, chid, 0, handle, context); 90 mutex_unlock(&nv_subdev(fifo)->mutex); 91 return ret; 92 } 93 94 void 95 nv04_fifo_object_detach(struct nvkm_object *parent, int cookie) 96 { 97 struct nv04_fifo *fifo = (void *)parent->engine; 98 struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; 99 mutex_lock(&nv_subdev(fifo)->mutex); 100 nvkm_ramht_remove(imem->ramht, cookie); 101 mutex_unlock(&nv_subdev(fifo)->mutex); 102 } 103 104 int 105 nv04_fifo_context_attach(struct nvkm_object *parent, 106 struct nvkm_object *object) 107 { 108 nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid; 109 return 0; 110 } 111 112 static int 113 nv04_fifo_chan_ctor(struct nvkm_object *parent, 114 struct nvkm_object *engine, 115 struct nvkm_oclass *oclass, void *data, u32 size, 116 struct nvkm_object **pobject) 117 { 118 union { 119 struct nv03_channel_dma_v0 v0; 120 } *args = data; 121 struct nv04_fifo *fifo = (void *)engine; 122 struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; 123 struct nv04_fifo_chan *chan; 124 int ret; 125 126 nvif_ioctl(parent, "create channel dma size %d\n", size); 127 if (nvif_unpack(args->v0, 0, 0, false)) { 128 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " 129 "offset %08x\n", args->v0.version, 130 args->v0.pushbuf, args->v0.offset); 131 } else 132 return ret; 133 134 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 135 0x10000, args->v0.pushbuf, 136 (1ULL << NVDEV_ENGINE_DMAOBJ) | 137 (1ULL << NVDEV_ENGINE_SW) | 138 (1ULL << NVDEV_ENGINE_GR), &chan); 139 *pobject = nv_object(chan); 140 if (ret) 141 return ret; 142 143 args->v0.chid = chan->base.chid; 144 145 nv_parent(chan)->object_attach = nv04_fifo_object_attach; 146 nv_parent(chan)->object_detach = nv04_fifo_object_detach; 147 nv_parent(chan)->context_attach = nv04_fifo_context_attach; 148 chan->ramfc = chan->base.chid * 32; 149 150 nvkm_kmap(imem->ramfc); 151 nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); 152 nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); 153 nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); 154 nvkm_wo32(imem->ramfc, chan->ramfc + 0x10, 155 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 156 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 157 #ifdef __BIG_ENDIAN 158 NV_PFIFO_CACHE1_BIG_ENDIAN | 159 #endif 160 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 161 nvkm_done(imem->ramfc); 162 return 0; 163 } 164 165 void 166 nv04_fifo_chan_dtor(struct nvkm_object *object) 167 { 168 struct nv04_fifo *fifo = (void *)object->engine; 169 struct nv04_fifo_chan *chan = (void *)object; 170 struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; 171 struct ramfc_desc *c = fifo->ramfc_desc; 172 173 nvkm_kmap(imem->ramfc); 174 do { 175 nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000); 176 } while ((++c)->bits); 177 nvkm_done(imem->ramfc); 178 179 nvkm_fifo_channel_destroy(&chan->base); 180 } 181 182 int 183 nv04_fifo_chan_init(struct nvkm_object *object) 184 { 185 struct nv04_fifo *fifo = (void *)object->engine; 186 struct nv04_fifo_chan *chan = (void *)object; 187 struct nvkm_device *device = fifo->base.engine.subdev.device; 188 u32 mask = 1 << chan->base.chid; 189 unsigned long flags; 190 int ret; 191 192 ret = nvkm_fifo_channel_init(&chan->base); 193 if (ret) 194 return ret; 195 196 spin_lock_irqsave(&fifo->base.lock, flags); 197 nvkm_mask(device, NV04_PFIFO_MODE, mask, mask); 198 spin_unlock_irqrestore(&fifo->base.lock, flags); 199 return 0; 200 } 201 202 int 203 nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) 204 { 205 struct nv04_fifo *fifo = (void *)object->engine; 206 struct nv04_fifo_chan *chan = (void *)object; 207 struct nvkm_device *device = fifo->base.engine.subdev.device; 208 struct nvkm_memory *fctx = device->imem->ramfc; 209 struct ramfc_desc *c; 210 unsigned long flags; 211 u32 data = chan->ramfc; 212 u32 chid; 213 214 /* prevent fifo context switches */ 215 spin_lock_irqsave(&fifo->base.lock, flags); 216 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 217 218 /* if this channel is active, replace it with a null context */ 219 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; 220 if (chid == chan->base.chid) { 221 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); 222 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); 223 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); 224 225 c = fifo->ramfc_desc; 226 do { 227 u32 rm = ((1ULL << c->bits) - 1) << c->regs; 228 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; 229 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs; 230 u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm); 231 nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); 232 } while ((++c)->bits); 233 234 c = fifo->ramfc_desc; 235 do { 236 nvkm_wr32(device, c->regp, 0x00000000); 237 } while ((++c)->bits); 238 239 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); 240 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); 241 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); 242 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 243 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 244 } 245 246 /* restore normal operation, after disabling dma mode */ 247 nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); 248 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 249 spin_unlock_irqrestore(&fifo->base.lock, flags); 250 251 return nvkm_fifo_channel_fini(&chan->base, suspend); 252 } 253 254 static struct nvkm_ofuncs 255 nv04_fifo_ofuncs = { 256 .ctor = nv04_fifo_chan_ctor, 257 .dtor = nv04_fifo_chan_dtor, 258 .init = nv04_fifo_chan_init, 259 .fini = nv04_fifo_chan_fini, 260 .map = _nvkm_fifo_channel_map, 261 .rd32 = _nvkm_fifo_channel_rd32, 262 .wr32 = _nvkm_fifo_channel_wr32, 263 .ntfy = _nvkm_fifo_channel_ntfy 264 }; 265 266 static struct nvkm_oclass 267 nv04_fifo_sclass[] = { 268 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs }, 269 {} 270 }; 271 272 /******************************************************************************* 273 * FIFO context - basically just the instmem reserved for the channel 274 ******************************************************************************/ 275 276 int 277 nv04_fifo_context_ctor(struct nvkm_object *parent, 278 struct nvkm_object *engine, 279 struct nvkm_oclass *oclass, void *data, u32 size, 280 struct nvkm_object **pobject) 281 { 282 struct nv04_fifo_base *base; 283 int ret; 284 285 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, 286 0x1000, NVOBJ_FLAG_HEAP, &base); 287 *pobject = nv_object(base); 288 if (ret) 289 return ret; 290 291 return 0; 292 } 293 294 static struct nvkm_oclass 295 nv04_fifo_cclass = { 296 .handle = NV_ENGCTX(FIFO, 0x04), 297 .ofuncs = &(struct nvkm_ofuncs) { 298 .ctor = nv04_fifo_context_ctor, 299 .dtor = _nvkm_fifo_context_dtor, 300 .init = _nvkm_fifo_context_init, 301 .fini = _nvkm_fifo_context_fini, 302 .rd32 = _nvkm_fifo_context_rd32, 303 .wr32 = _nvkm_fifo_context_wr32, 304 }, 305 }; 306 307 /******************************************************************************* 308 * PFIFO engine 309 ******************************************************************************/ 310 311 void 312 nv04_fifo_pause(struct nvkm_fifo *obj, unsigned long *pflags) 313 __acquires(fifo->base.lock) 314 { 315 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base); 316 struct nvkm_device *device = fifo->base.engine.subdev.device; 317 unsigned long flags; 318 319 spin_lock_irqsave(&fifo->base.lock, flags); 320 *pflags = flags; 321 322 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000); 323 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); 324 325 /* in some cases the puller may be left in an inconsistent state 326 * if you try to stop it while it's busy translating handles. 327 * sometimes you get a CACHE_ERROR, sometimes it just fails 328 * silently; sending incorrect instance offsets to PGRAPH after 329 * it's started up again. 330 * 331 * to avoid this, we invalidate the most recently calculated 332 * instance. 333 */ 334 nvkm_msec(device, 2000, 335 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0); 336 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY)) 337 break; 338 ); 339 340 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) & 341 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 342 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 343 344 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000); 345 } 346 347 void 348 nv04_fifo_start(struct nvkm_fifo *obj, unsigned long *pflags) 349 __releases(fifo->base.lock) 350 { 351 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base); 352 struct nvkm_device *device = fifo->base.engine.subdev.device; 353 unsigned long flags = *pflags; 354 355 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); 356 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001); 357 358 spin_unlock_irqrestore(&fifo->base.lock, flags); 359 } 360 361 static const char * 362 nv_dma_state_err(u32 state) 363 { 364 static const char * const desc[] = { 365 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", 366 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" 367 }; 368 return desc[(state >> 29) & 0x7]; 369 } 370 371 static bool 372 nv04_fifo_swmthd(struct nv04_fifo *fifo, u32 chid, u32 addr, u32 data) 373 { 374 struct nvkm_device *device = fifo->base.engine.subdev.device; 375 struct nv04_fifo_chan *chan = NULL; 376 struct nvkm_handle *bind; 377 const int subc = (addr >> 13) & 0x7; 378 const int mthd = addr & 0x1ffc; 379 bool handled = false; 380 unsigned long flags; 381 u32 engine; 382 383 spin_lock_irqsave(&fifo->base.lock, flags); 384 if (likely(chid >= fifo->base.min && chid <= fifo->base.max)) 385 chan = (void *)fifo->base.channel[chid]; 386 if (unlikely(!chan)) 387 goto out; 388 389 switch (mthd) { 390 case 0x0000: 391 bind = nvkm_namedb_get(nv_namedb(chan), data); 392 if (unlikely(!bind)) 393 break; 394 395 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) { 396 engine = 0x0000000f << (subc * 4); 397 chan->subc[subc] = data; 398 handled = true; 399 400 nvkm_mask(device, NV04_PFIFO_CACHE1_ENGINE, engine, 0); 401 } 402 403 nvkm_namedb_put(bind); 404 break; 405 default: 406 engine = nvkm_rd32(device, NV04_PFIFO_CACHE1_ENGINE); 407 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) 408 break; 409 410 bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]); 411 if (likely(bind)) { 412 if (!nv_call(bind->object, mthd, data)) 413 handled = true; 414 nvkm_namedb_put(bind); 415 } 416 break; 417 } 418 419 out: 420 spin_unlock_irqrestore(&fifo->base.lock, flags); 421 return handled; 422 } 423 424 static void 425 nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get) 426 { 427 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 428 struct nvkm_device *device = subdev->device; 429 u32 mthd, data; 430 int ptr; 431 432 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my 433 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests 434 * show that it wraps around to the start at GET=0x800.. No clue as to 435 * why.. 436 */ 437 ptr = (get & 0x7ff) >> 2; 438 439 if (device->card_type < NV_40) { 440 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr)); 441 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr)); 442 } else { 443 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr)); 444 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr)); 445 } 446 447 if (!nv04_fifo_swmthd(fifo, chid, mthd, data)) { 448 const char *client_name = 449 nvkm_client_name_for_fifo_chid(&fifo->base, chid); 450 nvkm_error(subdev, "CACHE_ERROR - " 451 "ch %d [%s] subc %d mthd %04x data %08x\n", 452 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, 453 data); 454 } 455 456 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 457 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 458 459 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 460 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1); 461 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 462 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 463 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1); 464 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0); 465 466 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 467 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 468 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 469 } 470 471 static void 472 nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) 473 { 474 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 475 struct nvkm_device *device = subdev->device; 476 u32 dma_get = nvkm_rd32(device, 0x003244); 477 u32 dma_put = nvkm_rd32(device, 0x003240); 478 u32 push = nvkm_rd32(device, 0x003220); 479 u32 state = nvkm_rd32(device, 0x003228); 480 const char *client_name; 481 482 client_name = nvkm_client_name_for_fifo_chid(&fifo->base, chid); 483 484 if (device->card_type == NV_50) { 485 u32 ho_get = nvkm_rd32(device, 0x003328); 486 u32 ho_put = nvkm_rd32(device, 0x003320); 487 u32 ib_get = nvkm_rd32(device, 0x003334); 488 u32 ib_put = nvkm_rd32(device, 0x003330); 489 490 nvkm_error(subdev, "DMA_PUSHER - " 491 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x " 492 "ib_put %08x state %08x (err: %s) push %08x\n", 493 chid, client_name, ho_get, dma_get, ho_put, dma_put, 494 ib_get, ib_put, state, nv_dma_state_err(state), 495 push); 496 497 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 498 nvkm_wr32(device, 0x003364, 0x00000000); 499 if (dma_get != dma_put || ho_get != ho_put) { 500 nvkm_wr32(device, 0x003244, dma_put); 501 nvkm_wr32(device, 0x003328, ho_put); 502 } else 503 if (ib_get != ib_put) 504 nvkm_wr32(device, 0x003334, ib_put); 505 } else { 506 nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x " 507 "state %08x (err: %s) push %08x\n", 508 chid, client_name, dma_get, dma_put, state, 509 nv_dma_state_err(state), push); 510 511 if (dma_get != dma_put) 512 nvkm_wr32(device, 0x003244, dma_put); 513 } 514 515 nvkm_wr32(device, 0x003228, 0x00000000); 516 nvkm_wr32(device, 0x003220, 0x00000001); 517 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 518 } 519 520 void 521 nv04_fifo_intr(struct nvkm_subdev *subdev) 522 { 523 struct nvkm_device *device = subdev->device; 524 struct nv04_fifo *fifo = (void *)subdev; 525 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0); 526 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask; 527 u32 reassign, chid, get, sem; 528 529 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; 530 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 531 532 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; 533 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); 534 535 if (stat & NV_PFIFO_INTR_CACHE_ERROR) { 536 nv04_fifo_cache_error(fifo, chid, get); 537 stat &= ~NV_PFIFO_INTR_CACHE_ERROR; 538 } 539 540 if (stat & NV_PFIFO_INTR_DMA_PUSHER) { 541 nv04_fifo_dma_pusher(fifo, chid); 542 stat &= ~NV_PFIFO_INTR_DMA_PUSHER; 543 } 544 545 if (stat & NV_PFIFO_INTR_SEMAPHORE) { 546 stat &= ~NV_PFIFO_INTR_SEMAPHORE; 547 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); 548 549 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE); 550 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 551 552 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 553 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 554 } 555 556 if (device->card_type == NV_50) { 557 if (stat & 0x00000010) { 558 stat &= ~0x00000010; 559 nvkm_wr32(device, 0x002100, 0x00000010); 560 } 561 562 if (stat & 0x40000000) { 563 nvkm_wr32(device, 0x002100, 0x40000000); 564 nvkm_fifo_uevent(&fifo->base); 565 stat &= ~0x40000000; 566 } 567 } 568 569 if (stat) { 570 nvkm_warn(subdev, "intr %08x\n", stat); 571 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); 572 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat); 573 } 574 575 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign); 576 } 577 578 static int 579 nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 580 struct nvkm_oclass *oclass, void *data, u32 size, 581 struct nvkm_object **pobject) 582 { 583 struct nv04_fifo *fifo; 584 int ret; 585 586 ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &fifo); 587 *pobject = nv_object(fifo); 588 if (ret) 589 return ret; 590 591 nv_subdev(fifo)->unit = 0x00000100; 592 nv_subdev(fifo)->intr = nv04_fifo_intr; 593 nv_engine(fifo)->cclass = &nv04_fifo_cclass; 594 nv_engine(fifo)->sclass = nv04_fifo_sclass; 595 fifo->base.pause = nv04_fifo_pause; 596 fifo->base.start = nv04_fifo_start; 597 fifo->ramfc_desc = nv04_ramfc; 598 return 0; 599 } 600 601 void 602 nv04_fifo_dtor(struct nvkm_object *object) 603 { 604 struct nv04_fifo *fifo = (void *)object; 605 nvkm_fifo_destroy(&fifo->base); 606 } 607 608 int 609 nv04_fifo_init(struct nvkm_object *object) 610 { 611 struct nv04_fifo *fifo = (void *)object; 612 struct nvkm_device *device = fifo->base.engine.subdev.device; 613 struct nvkm_instmem *imem = device->imem; 614 struct nvkm_ramht *ramht = imem->ramht; 615 struct nvkm_memory *ramro = imem->ramro; 616 struct nvkm_memory *ramfc = imem->ramfc; 617 int ret; 618 619 ret = nvkm_fifo_init(&fifo->base); 620 if (ret) 621 return ret; 622 623 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff); 624 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 625 626 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 627 ((ramht->bits - 9) << 16) | 628 (ramht->gpuobj->addr >> 8)); 629 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); 630 nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8); 631 632 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); 633 634 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); 635 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); 636 637 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 638 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 639 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 640 return 0; 641 } 642 643 struct nvkm_oclass * 644 nv04_fifo_oclass = &(struct nvkm_oclass) { 645 .handle = NV_ENGINE(FIFO, 0x04), 646 .ofuncs = &(struct nvkm_ofuncs) { 647 .ctor = nv04_fifo_ctor, 648 .dtor = nv04_fifo_dtor, 649 .init = nv04_fifo_init, 650 .fini = _nvkm_fifo_fini, 651 }, 652 }; 653