1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv31.h" 25 26 #include <core/client.h> 27 #include <core/handle.h> 28 #include <engine/fifo.h> 29 #include <subdev/instmem.h> 30 #include <subdev/fb.h> 31 #include <subdev/timer.h> 32 33 /******************************************************************************* 34 * MPEG object classes 35 ******************************************************************************/ 36 37 static int 38 nv31_mpeg_object_ctor(struct nvkm_object *parent, 39 struct nvkm_object *engine, 40 struct nvkm_oclass *oclass, void *data, u32 size, 41 struct nvkm_object **pobject) 42 { 43 struct nvkm_gpuobj *obj; 44 int ret; 45 46 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent, 47 20, 16, 0, &obj); 48 *pobject = nv_object(obj); 49 if (ret) 50 return ret; 51 52 nvkm_kmap(obj); 53 nvkm_wo32(obj, 0x00, nv_mclass(obj)); 54 nvkm_wo32(obj, 0x04, 0x00000000); 55 nvkm_wo32(obj, 0x08, 0x00000000); 56 nvkm_wo32(obj, 0x0c, 0x00000000); 57 nvkm_done(obj); 58 return 0; 59 } 60 61 static int 62 nv31_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len) 63 { 64 struct nv31_mpeg *mpeg = (void *)object->engine; 65 struct nvkm_device *device = mpeg->base.engine.subdev.device; 66 struct nvkm_instmem *imem = device->imem; 67 u32 inst = *(u32 *)arg << 4; 68 u32 dma0 = imem->func->rd32(imem, inst + 0); 69 u32 dma1 = imem->func->rd32(imem, inst + 4); 70 u32 dma2 = imem->func->rd32(imem, inst + 8); 71 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20); 72 u32 size = dma1 + 1; 73 74 /* only allow linear DMA objects */ 75 if (!(dma0 & 0x00002000)) 76 return -EINVAL; 77 78 if (mthd == 0x0190) { 79 /* DMA_CMD */ 80 nvkm_mask(device, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0); 81 nvkm_wr32(device, 0x00b334, base); 82 nvkm_wr32(device, 0x00b324, size); 83 } else 84 if (mthd == 0x01a0) { 85 /* DMA_DATA */ 86 nvkm_mask(device, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0); 87 nvkm_wr32(device, 0x00b360, base); 88 nvkm_wr32(device, 0x00b364, size); 89 } else { 90 /* DMA_IMAGE, VRAM only */ 91 if (dma0 & 0x00030000) 92 return -EINVAL; 93 94 nvkm_wr32(device, 0x00b370, base); 95 nvkm_wr32(device, 0x00b374, size); 96 } 97 98 return 0; 99 } 100 101 struct nvkm_ofuncs 102 nv31_mpeg_ofuncs = { 103 .ctor = nv31_mpeg_object_ctor, 104 .dtor = _nvkm_gpuobj_dtor, 105 .init = _nvkm_gpuobj_init, 106 .fini = _nvkm_gpuobj_fini, 107 .rd32 = _nvkm_gpuobj_rd32, 108 .wr32 = _nvkm_gpuobj_wr32, 109 }; 110 111 static struct nvkm_omthds 112 nv31_mpeg_omthds[] = { 113 { 0x0190, 0x0190, nv31_mpeg_mthd_dma }, 114 { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma }, 115 { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma }, 116 {} 117 }; 118 119 struct nvkm_oclass 120 nv31_mpeg_sclass[] = { 121 { 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds }, 122 {} 123 }; 124 125 /******************************************************************************* 126 * PMPEG context 127 ******************************************************************************/ 128 129 static int 130 nv31_mpeg_context_ctor(struct nvkm_object *parent, 131 struct nvkm_object *engine, 132 struct nvkm_oclass *oclass, void *data, u32 size, 133 struct nvkm_object **pobject) 134 { 135 struct nv31_mpeg *mpeg = (void *)engine; 136 struct nv31_mpeg_chan *chan; 137 unsigned long flags; 138 int ret; 139 140 ret = nvkm_object_create(parent, engine, oclass, 0, &chan); 141 *pobject = nv_object(chan); 142 if (ret) 143 return ret; 144 145 spin_lock_irqsave(&nv_engine(mpeg)->lock, flags); 146 if (mpeg->chan) { 147 spin_unlock_irqrestore(&nv_engine(mpeg)->lock, flags); 148 nvkm_object_destroy(&chan->base); 149 *pobject = NULL; 150 return -EBUSY; 151 } 152 mpeg->chan = chan; 153 spin_unlock_irqrestore(&nv_engine(mpeg)->lock, flags); 154 return 0; 155 } 156 157 static void 158 nv31_mpeg_context_dtor(struct nvkm_object *object) 159 { 160 struct nv31_mpeg *mpeg = (void *)object->engine; 161 struct nv31_mpeg_chan *chan = (void *)object; 162 unsigned long flags; 163 164 spin_lock_irqsave(&nv_engine(mpeg)->lock, flags); 165 mpeg->chan = NULL; 166 spin_unlock_irqrestore(&nv_engine(mpeg)->lock, flags); 167 nvkm_object_destroy(&chan->base); 168 } 169 170 struct nvkm_oclass 171 nv31_mpeg_cclass = { 172 .handle = NV_ENGCTX(MPEG, 0x31), 173 .ofuncs = &(struct nvkm_ofuncs) { 174 .ctor = nv31_mpeg_context_ctor, 175 .dtor = nv31_mpeg_context_dtor, 176 .init = _nvkm_object_init, 177 .fini = _nvkm_object_fini, 178 }, 179 }; 180 181 /******************************************************************************* 182 * PMPEG engine/subdev functions 183 ******************************************************************************/ 184 185 void 186 nv31_mpeg_tile_prog(struct nvkm_engine *engine, int i) 187 { 188 struct nv31_mpeg *mpeg = (void *)engine; 189 struct nvkm_device *device = mpeg->base.engine.subdev.device; 190 struct nvkm_fb_tile *tile = &device->fb->tile.region[i]; 191 192 nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch); 193 nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit); 194 nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr); 195 } 196 197 void 198 nv31_mpeg_intr(struct nvkm_subdev *subdev) 199 { 200 struct nv31_mpeg *mpeg = (void *)subdev; 201 struct nvkm_device *device = mpeg->base.engine.subdev.device; 202 struct nvkm_fifo *fifo = device->fifo; 203 struct nvkm_handle *handle; 204 struct nvkm_object *engctx; 205 u32 stat = nvkm_rd32(device, 0x00b100); 206 u32 type = nvkm_rd32(device, 0x00b230); 207 u32 mthd = nvkm_rd32(device, 0x00b234); 208 u32 data = nvkm_rd32(device, 0x00b238); 209 u32 show = stat; 210 unsigned long flags; 211 212 spin_lock_irqsave(&nv_engine(mpeg)->lock, flags); 213 engctx = nv_object(mpeg->chan); 214 215 if (stat & 0x01000000) { 216 /* happens on initial binding of the object */ 217 if (type == 0x00000020 && mthd == 0x0000) { 218 nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000); 219 show &= ~0x01000000; 220 } 221 222 if (type == 0x00000010 && engctx) { 223 handle = nvkm_handle_get_class(engctx, 0x3174); 224 if (handle && !nv_call(handle->object, mthd, data)) 225 show &= ~0x01000000; 226 nvkm_handle_put(handle); 227 } 228 } 229 230 nvkm_wr32(device, 0x00b100, stat); 231 nvkm_wr32(device, 0x00b230, 0x00000001); 232 233 if (show) { 234 nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n", 235 fifo->chid(fifo, engctx), 236 nvkm_client_name(engctx), stat, type, mthd, data); 237 } 238 239 spin_unlock_irqrestore(&nv_engine(mpeg)->lock, flags); 240 } 241 242 static int 243 nv31_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 244 struct nvkm_oclass *oclass, void *data, u32 size, 245 struct nvkm_object **pobject) 246 { 247 struct nv31_mpeg *mpeg; 248 int ret; 249 250 ret = nvkm_mpeg_create(parent, engine, oclass, &mpeg); 251 *pobject = nv_object(mpeg); 252 if (ret) 253 return ret; 254 255 nv_subdev(mpeg)->unit = 0x00000002; 256 nv_subdev(mpeg)->intr = nv31_mpeg_intr; 257 nv_engine(mpeg)->cclass = &nv31_mpeg_cclass; 258 nv_engine(mpeg)->sclass = nv31_mpeg_sclass; 259 nv_engine(mpeg)->tile_prog = nv31_mpeg_tile_prog; 260 return 0; 261 } 262 263 int 264 nv31_mpeg_init(struct nvkm_object *object) 265 { 266 struct nvkm_engine *engine = nv_engine(object); 267 struct nv31_mpeg *mpeg = (void *)object; 268 struct nvkm_subdev *subdev = &mpeg->base.engine.subdev; 269 struct nvkm_device *device = subdev->device; 270 struct nvkm_fb *fb = device->fb; 271 int ret, i; 272 273 ret = nvkm_mpeg_init(&mpeg->base); 274 if (ret) 275 return ret; 276 277 /* VPE init */ 278 nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ 279 nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ 280 281 for (i = 0; i < fb->tile.regions; i++) 282 engine->tile_prog(engine, i); 283 284 /* PMPEG init */ 285 nvkm_wr32(device, 0x00b32c, 0x00000000); 286 nvkm_wr32(device, 0x00b314, 0x00000100); 287 nvkm_wr32(device, 0x00b220, 0x00000031); 288 nvkm_wr32(device, 0x00b300, 0x02001ec1); 289 nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001); 290 291 nvkm_wr32(device, 0x00b100, 0xffffffff); 292 nvkm_wr32(device, 0x00b140, 0xffffffff); 293 294 if (nvkm_msec(device, 2000, 295 if (!(nvkm_rd32(device, 0x00b200) & 0x00000001)) 296 break; 297 ) < 0) { 298 nvkm_error(subdev, "timeout %08x\n", 299 nvkm_rd32(device, 0x00b200)); 300 return -EBUSY; 301 } 302 303 return 0; 304 } 305 306 struct nvkm_oclass 307 nv31_mpeg_oclass = { 308 .handle = NV_ENGINE(MPEG, 0x31), 309 .ofuncs = &(struct nvkm_ofuncs) { 310 .ctor = nv31_mpeg_ctor, 311 .dtor = _nvkm_mpeg_dtor, 312 .init = nv31_mpeg_init, 313 .fini = _nvkm_mpeg_fini, 314 }, 315 }; 316