1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv50.h" 25 #include "nv04.h" 26 27 #include <core/client.h> 28 #include <core/engctx.h> 29 #include <core/ramht.h> 30 #include <subdev/bar.h> 31 #include <subdev/mmu.h> 32 #include <subdev/timer.h> 33 34 #include <nvif/class.h> 35 #include <nvif/unpack.h> 36 37 /******************************************************************************* 38 * FIFO channel objects 39 ******************************************************************************/ 40 41 static int 42 g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object) 43 { 44 struct nvkm_bar *bar = nvkm_bar(parent); 45 struct nv50_fifo_base *base = (void *)parent->parent; 46 struct nvkm_gpuobj *ectx = (void *)object; 47 u64 limit = ectx->addr + ectx->size - 1; 48 u64 start = ectx->addr; 49 u32 addr; 50 51 switch (nv_engidx(object->engine)) { 52 case NVDEV_ENGINE_SW : return 0; 53 case NVDEV_ENGINE_GR : addr = 0x0020; break; 54 case NVDEV_ENGINE_VP : 55 case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break; 56 case NVDEV_ENGINE_MSPPP : 57 case NVDEV_ENGINE_MPEG : addr = 0x0060; break; 58 case NVDEV_ENGINE_BSP : 59 case NVDEV_ENGINE_MSVLD : addr = 0x0080; break; 60 case NVDEV_ENGINE_CIPHER: 61 case NVDEV_ENGINE_SEC : addr = 0x00a0; break; 62 case NVDEV_ENGINE_CE0 : addr = 0x00c0; break; 63 default: 64 return -EINVAL; 65 } 66 67 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; 68 nv_wo32(base->eng, addr + 0x00, 0x00190000); 69 nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit)); 70 nv_wo32(base->eng, addr + 0x08, lower_32_bits(start)); 71 nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 | 72 upper_32_bits(start)); 73 nv_wo32(base->eng, addr + 0x10, 0x00000000); 74 nv_wo32(base->eng, addr + 0x14, 0x00000000); 75 bar->flush(bar); 76 return 0; 77 } 78 79 static int 80 g84_fifo_context_detach(struct nvkm_object *parent, bool suspend, 81 struct nvkm_object *object) 82 { 83 struct nvkm_bar *bar = nvkm_bar(parent); 84 struct nv50_fifo_priv *priv = (void *)parent->engine; 85 struct nv50_fifo_base *base = (void *)parent->parent; 86 struct nv50_fifo_chan *chan = (void *)parent; 87 u32 addr, save, engn; 88 bool done; 89 90 switch (nv_engidx(object->engine)) { 91 case NVDEV_ENGINE_SW : return 0; 92 case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break; 93 case NVDEV_ENGINE_VP : 94 case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break; 95 case NVDEV_ENGINE_MSPPP : 96 case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break; 97 case NVDEV_ENGINE_BSP : 98 case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break; 99 case NVDEV_ENGINE_CIPHER: 100 case NVDEV_ENGINE_SEC : engn = 4; addr = 0x00a0; break; 101 case NVDEV_ENGINE_CE0 : engn = 2; addr = 0x00c0; break; 102 default: 103 return -EINVAL; 104 } 105 106 save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn); 107 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); 108 done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff); 109 nv_wr32(priv, 0x002520, save); 110 if (!done) { 111 nv_error(priv, "channel %d [%s] unload timeout\n", 112 chan->base.chid, nvkm_client_name(chan)); 113 if (suspend) 114 return -EBUSY; 115 } 116 117 nv_wo32(base->eng, addr + 0x00, 0x00000000); 118 nv_wo32(base->eng, addr + 0x04, 0x00000000); 119 nv_wo32(base->eng, addr + 0x08, 0x00000000); 120 nv_wo32(base->eng, addr + 0x0c, 0x00000000); 121 nv_wo32(base->eng, addr + 0x10, 0x00000000); 122 nv_wo32(base->eng, addr + 0x14, 0x00000000); 123 bar->flush(bar); 124 return 0; 125 } 126 127 static int 128 g84_fifo_object_attach(struct nvkm_object *parent, 129 struct nvkm_object *object, u32 handle) 130 { 131 struct nv50_fifo_chan *chan = (void *)parent; 132 u32 context; 133 134 if (nv_iclass(object, NV_GPUOBJ_CLASS)) 135 context = nv_gpuobj(object)->node->offset >> 4; 136 else 137 context = 0x00000004; /* just non-zero */ 138 139 switch (nv_engidx(object->engine)) { 140 case NVDEV_ENGINE_DMAOBJ: 141 case NVDEV_ENGINE_SW : context |= 0x00000000; break; 142 case NVDEV_ENGINE_GR : context |= 0x00100000; break; 143 case NVDEV_ENGINE_MPEG : 144 case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break; 145 case NVDEV_ENGINE_ME : 146 case NVDEV_ENGINE_CE0 : context |= 0x00300000; break; 147 case NVDEV_ENGINE_VP : 148 case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break; 149 case NVDEV_ENGINE_CIPHER: 150 case NVDEV_ENGINE_SEC : 151 case NVDEV_ENGINE_VIC : context |= 0x00500000; break; 152 case NVDEV_ENGINE_BSP : 153 case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break; 154 default: 155 return -EINVAL; 156 } 157 158 return nvkm_ramht_insert(chan->ramht, 0, handle, context); 159 } 160 161 static int 162 g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, 163 struct nvkm_oclass *oclass, void *data, u32 size, 164 struct nvkm_object **pobject) 165 { 166 union { 167 struct nv03_channel_dma_v0 v0; 168 } *args = data; 169 struct nvkm_bar *bar = nvkm_bar(parent); 170 struct nv50_fifo_base *base = (void *)parent; 171 struct nv50_fifo_chan *chan; 172 int ret; 173 174 nv_ioctl(parent, "create channel dma size %d\n", size); 175 if (nvif_unpack(args->v0, 0, 0, false)) { 176 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " 177 "offset %016llx\n", args->v0.version, 178 args->v0.pushbuf, args->v0.offset); 179 } else 180 return ret; 181 182 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 183 0x2000, args->v0.pushbuf, 184 (1ULL << NVDEV_ENGINE_DMAOBJ) | 185 (1ULL << NVDEV_ENGINE_SW) | 186 (1ULL << NVDEV_ENGINE_GR) | 187 (1ULL << NVDEV_ENGINE_MPEG) | 188 (1ULL << NVDEV_ENGINE_ME) | 189 (1ULL << NVDEV_ENGINE_VP) | 190 (1ULL << NVDEV_ENGINE_CIPHER) | 191 (1ULL << NVDEV_ENGINE_SEC) | 192 (1ULL << NVDEV_ENGINE_BSP) | 193 (1ULL << NVDEV_ENGINE_MSVLD) | 194 (1ULL << NVDEV_ENGINE_MSPDEC) | 195 (1ULL << NVDEV_ENGINE_MSPPP) | 196 (1ULL << NVDEV_ENGINE_CE0) | 197 (1ULL << NVDEV_ENGINE_VIC), &chan); 198 *pobject = nv_object(chan); 199 if (ret) 200 return ret; 201 202 args->v0.chid = chan->base.chid; 203 204 ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, 205 &chan->ramht); 206 if (ret) 207 return ret; 208 209 nv_parent(chan)->context_attach = g84_fifo_context_attach; 210 nv_parent(chan)->context_detach = g84_fifo_context_detach; 211 nv_parent(chan)->object_attach = g84_fifo_object_attach; 212 nv_parent(chan)->object_detach = nv50_fifo_object_detach; 213 214 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset)); 215 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset)); 216 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset)); 217 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset)); 218 nv_wo32(base->ramfc, 0x3c, 0x003f6078); 219 nv_wo32(base->ramfc, 0x44, 0x01003fff); 220 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); 221 nv_wo32(base->ramfc, 0x4c, 0xffffffff); 222 nv_wo32(base->ramfc, 0x60, 0x7fffffff); 223 nv_wo32(base->ramfc, 0x78, 0x00000000); 224 nv_wo32(base->ramfc, 0x7c, 0x30000001); 225 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | 226 (4 << 24) /* SEARCH_FULL */ | 227 (chan->ramht->gpuobj.node->offset >> 4)); 228 nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10); 229 nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12); 230 bar->flush(bar); 231 return 0; 232 } 233 234 static int 235 g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, 236 struct nvkm_oclass *oclass, void *data, u32 size, 237 struct nvkm_object **pobject) 238 { 239 union { 240 struct nv50_channel_gpfifo_v0 v0; 241 } *args = data; 242 struct nvkm_bar *bar = nvkm_bar(parent); 243 struct nv50_fifo_base *base = (void *)parent; 244 struct nv50_fifo_chan *chan; 245 u64 ioffset, ilength; 246 int ret; 247 248 nv_ioctl(parent, "create channel gpfifo size %d\n", size); 249 if (nvif_unpack(args->v0, 0, 0, false)) { 250 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " 251 "ioffset %016llx ilength %08x\n", 252 args->v0.version, args->v0.pushbuf, args->v0.ioffset, 253 args->v0.ilength); 254 } else 255 return ret; 256 257 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 258 0x2000, args->v0.pushbuf, 259 (1ULL << NVDEV_ENGINE_DMAOBJ) | 260 (1ULL << NVDEV_ENGINE_SW) | 261 (1ULL << NVDEV_ENGINE_GR) | 262 (1ULL << NVDEV_ENGINE_MPEG) | 263 (1ULL << NVDEV_ENGINE_ME) | 264 (1ULL << NVDEV_ENGINE_VP) | 265 (1ULL << NVDEV_ENGINE_CIPHER) | 266 (1ULL << NVDEV_ENGINE_SEC) | 267 (1ULL << NVDEV_ENGINE_BSP) | 268 (1ULL << NVDEV_ENGINE_MSVLD) | 269 (1ULL << NVDEV_ENGINE_MSPDEC) | 270 (1ULL << NVDEV_ENGINE_MSPPP) | 271 (1ULL << NVDEV_ENGINE_CE0) | 272 (1ULL << NVDEV_ENGINE_VIC), &chan); 273 *pobject = nv_object(chan); 274 if (ret) 275 return ret; 276 277 args->v0.chid = chan->base.chid; 278 279 ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, 280 &chan->ramht); 281 if (ret) 282 return ret; 283 284 nv_parent(chan)->context_attach = g84_fifo_context_attach; 285 nv_parent(chan)->context_detach = g84_fifo_context_detach; 286 nv_parent(chan)->object_attach = g84_fifo_object_attach; 287 nv_parent(chan)->object_detach = nv50_fifo_object_detach; 288 289 ioffset = args->v0.ioffset; 290 ilength = order_base_2(args->v0.ilength / 8); 291 292 nv_wo32(base->ramfc, 0x3c, 0x403f6078); 293 nv_wo32(base->ramfc, 0x44, 0x01003fff); 294 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); 295 nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset)); 296 nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16)); 297 nv_wo32(base->ramfc, 0x60, 0x7fffffff); 298 nv_wo32(base->ramfc, 0x78, 0x00000000); 299 nv_wo32(base->ramfc, 0x7c, 0x30000001); 300 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | 301 (4 << 24) /* SEARCH_FULL */ | 302 (chan->ramht->gpuobj.node->offset >> 4)); 303 nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10); 304 nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12); 305 bar->flush(bar); 306 return 0; 307 } 308 309 static int 310 g84_fifo_chan_init(struct nvkm_object *object) 311 { 312 struct nv50_fifo_priv *priv = (void *)object->engine; 313 struct nv50_fifo_base *base = (void *)object->parent; 314 struct nv50_fifo_chan *chan = (void *)object; 315 struct nvkm_gpuobj *ramfc = base->ramfc; 316 u32 chid = chan->base.chid; 317 int ret; 318 319 ret = nvkm_fifo_channel_init(&chan->base); 320 if (ret) 321 return ret; 322 323 nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8); 324 nv50_fifo_playlist_update(priv); 325 return 0; 326 } 327 328 static struct nvkm_ofuncs 329 g84_fifo_ofuncs_dma = { 330 .ctor = g84_fifo_chan_ctor_dma, 331 .dtor = nv50_fifo_chan_dtor, 332 .init = g84_fifo_chan_init, 333 .fini = nv50_fifo_chan_fini, 334 .map = _nvkm_fifo_channel_map, 335 .rd32 = _nvkm_fifo_channel_rd32, 336 .wr32 = _nvkm_fifo_channel_wr32, 337 .ntfy = _nvkm_fifo_channel_ntfy 338 }; 339 340 static struct nvkm_ofuncs 341 g84_fifo_ofuncs_ind = { 342 .ctor = g84_fifo_chan_ctor_ind, 343 .dtor = nv50_fifo_chan_dtor, 344 .init = g84_fifo_chan_init, 345 .fini = nv50_fifo_chan_fini, 346 .map = _nvkm_fifo_channel_map, 347 .rd32 = _nvkm_fifo_channel_rd32, 348 .wr32 = _nvkm_fifo_channel_wr32, 349 .ntfy = _nvkm_fifo_channel_ntfy 350 }; 351 352 static struct nvkm_oclass 353 g84_fifo_sclass[] = { 354 { G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma }, 355 { G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind }, 356 {} 357 }; 358 359 /******************************************************************************* 360 * FIFO context - basically just the instmem reserved for the channel 361 ******************************************************************************/ 362 363 static int 364 g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 365 struct nvkm_oclass *oclass, void *data, u32 size, 366 struct nvkm_object **pobject) 367 { 368 struct nv50_fifo_base *base; 369 int ret; 370 371 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000, 372 0x1000, NVOBJ_FLAG_HEAP, &base); 373 *pobject = nv_object(base); 374 if (ret) 375 return ret; 376 377 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0, 378 NVOBJ_FLAG_ZERO_ALLOC, &base->eng); 379 if (ret) 380 return ret; 381 382 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 383 0, &base->pgd); 384 if (ret) 385 return ret; 386 387 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); 388 if (ret) 389 return ret; 390 391 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1000, 392 0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache); 393 if (ret) 394 return ret; 395 396 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0100, 397 0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc); 398 if (ret) 399 return ret; 400 401 return 0; 402 } 403 404 static struct nvkm_oclass 405 g84_fifo_cclass = { 406 .handle = NV_ENGCTX(FIFO, 0x84), 407 .ofuncs = &(struct nvkm_ofuncs) { 408 .ctor = g84_fifo_context_ctor, 409 .dtor = nv50_fifo_context_dtor, 410 .init = _nvkm_fifo_context_init, 411 .fini = _nvkm_fifo_context_fini, 412 .rd32 = _nvkm_fifo_context_rd32, 413 .wr32 = _nvkm_fifo_context_wr32, 414 }, 415 }; 416 417 /******************************************************************************* 418 * PFIFO engine 419 ******************************************************************************/ 420 421 static void 422 g84_fifo_uevent_init(struct nvkm_event *event, int type, int index) 423 { 424 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 425 nv_mask(fifo, 0x002140, 0x40000000, 0x40000000); 426 } 427 428 static void 429 g84_fifo_uevent_fini(struct nvkm_event *event, int type, int index) 430 { 431 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 432 nv_mask(fifo, 0x002140, 0x40000000, 0x00000000); 433 } 434 435 static const struct nvkm_event_func 436 g84_fifo_uevent_func = { 437 .ctor = nvkm_fifo_uevent_ctor, 438 .init = g84_fifo_uevent_init, 439 .fini = g84_fifo_uevent_fini, 440 }; 441 442 static int 443 g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 444 struct nvkm_oclass *oclass, void *data, u32 size, 445 struct nvkm_object **pobject) 446 { 447 struct nv50_fifo_priv *priv; 448 int ret; 449 450 ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv); 451 *pobject = nv_object(priv); 452 if (ret) 453 return ret; 454 455 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, 456 &priv->playlist[0]); 457 if (ret) 458 return ret; 459 460 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, 461 &priv->playlist[1]); 462 if (ret) 463 return ret; 464 465 ret = nvkm_event_init(&g84_fifo_uevent_func, 1, 1, &priv->base.uevent); 466 if (ret) 467 return ret; 468 469 nv_subdev(priv)->unit = 0x00000100; 470 nv_subdev(priv)->intr = nv04_fifo_intr; 471 nv_engine(priv)->cclass = &g84_fifo_cclass; 472 nv_engine(priv)->sclass = g84_fifo_sclass; 473 priv->base.pause = nv04_fifo_pause; 474 priv->base.start = nv04_fifo_start; 475 return 0; 476 } 477 478 struct nvkm_oclass * 479 g84_fifo_oclass = &(struct nvkm_oclass) { 480 .handle = NV_ENGINE(FIFO, 0x84), 481 .ofuncs = &(struct nvkm_ofuncs) { 482 .ctor = g84_fifo_ctor, 483 .dtor = nv50_fifo_dtor, 484 .init = nv50_fifo_init, 485 .fini = _nvkm_fifo_fini, 486 }, 487 }; 488