1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv31.h"
25 
26 #include <core/client.h>
27 #include <core/handle.h>
28 #include <engine/fifo.h>
29 #include <subdev/instmem.h>
30 #include <subdev/fb.h>
31 #include <subdev/timer.h>
32 
33 /*******************************************************************************
34  * MPEG object classes
35  ******************************************************************************/
36 
37 static int
38 nv31_mpeg_object_ctor(struct nvkm_object *parent,
39 		      struct nvkm_object *engine,
40 		      struct nvkm_oclass *oclass, void *data, u32 size,
41 		      struct nvkm_object **pobject)
42 {
43 	struct nvkm_gpuobj *obj;
44 	int ret;
45 
46 	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
47 				 20, 16, 0, &obj);
48 	*pobject = nv_object(obj);
49 	if (ret)
50 		return ret;
51 
52 	nv_wo32(obj, 0x00, nv_mclass(obj));
53 	nv_wo32(obj, 0x04, 0x00000000);
54 	nv_wo32(obj, 0x08, 0x00000000);
55 	nv_wo32(obj, 0x0c, 0x00000000);
56 	return 0;
57 }
58 
59 static int
60 nv31_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
61 {
62 	struct nv31_mpeg *mpeg = (void *)object->engine;
63 	struct nvkm_device *device = mpeg->base.engine.subdev.device;
64 	struct nvkm_instmem *imem = device->imem;
65 	u32 inst = *(u32 *)arg << 4;
66 	u32 dma0 = nv_ro32(imem, inst + 0);
67 	u32 dma1 = nv_ro32(imem, inst + 4);
68 	u32 dma2 = nv_ro32(imem, inst + 8);
69 	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
70 	u32 size = dma1 + 1;
71 
72 	/* only allow linear DMA objects */
73 	if (!(dma0 & 0x00002000))
74 		return -EINVAL;
75 
76 	if (mthd == 0x0190) {
77 		/* DMA_CMD */
78 		nvkm_mask(device, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0);
79 		nvkm_wr32(device, 0x00b334, base);
80 		nvkm_wr32(device, 0x00b324, size);
81 	} else
82 	if (mthd == 0x01a0) {
83 		/* DMA_DATA */
84 		nvkm_mask(device, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0);
85 		nvkm_wr32(device, 0x00b360, base);
86 		nvkm_wr32(device, 0x00b364, size);
87 	} else {
88 		/* DMA_IMAGE, VRAM only */
89 		if (dma0 & 0x00030000)
90 			return -EINVAL;
91 
92 		nvkm_wr32(device, 0x00b370, base);
93 		nvkm_wr32(device, 0x00b374, size);
94 	}
95 
96 	return 0;
97 }
98 
99 struct nvkm_ofuncs
100 nv31_mpeg_ofuncs = {
101 	.ctor = nv31_mpeg_object_ctor,
102 	.dtor = _nvkm_gpuobj_dtor,
103 	.init = _nvkm_gpuobj_init,
104 	.fini = _nvkm_gpuobj_fini,
105 	.rd32 = _nvkm_gpuobj_rd32,
106 	.wr32 = _nvkm_gpuobj_wr32,
107 };
108 
109 static struct nvkm_omthds
110 nv31_mpeg_omthds[] = {
111 	{ 0x0190, 0x0190, nv31_mpeg_mthd_dma },
112 	{ 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
113 	{ 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
114 	{}
115 };
116 
117 struct nvkm_oclass
118 nv31_mpeg_sclass[] = {
119 	{ 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
120 	{}
121 };
122 
123 /*******************************************************************************
124  * PMPEG context
125  ******************************************************************************/
126 
127 static int
128 nv31_mpeg_context_ctor(struct nvkm_object *parent,
129 		       struct nvkm_object *engine,
130 		       struct nvkm_oclass *oclass, void *data, u32 size,
131 		       struct nvkm_object **pobject)
132 {
133 	struct nv31_mpeg *mpeg = (void *)engine;
134 	struct nv31_mpeg_chan *chan;
135 	unsigned long flags;
136 	int ret;
137 
138 	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
139 	*pobject = nv_object(chan);
140 	if (ret)
141 		return ret;
142 
143 	spin_lock_irqsave(&nv_engine(mpeg)->lock, flags);
144 	if (mpeg->chan) {
145 		spin_unlock_irqrestore(&nv_engine(mpeg)->lock, flags);
146 		nvkm_object_destroy(&chan->base);
147 		*pobject = NULL;
148 		return -EBUSY;
149 	}
150 	mpeg->chan = chan;
151 	spin_unlock_irqrestore(&nv_engine(mpeg)->lock, flags);
152 	return 0;
153 }
154 
155 static void
156 nv31_mpeg_context_dtor(struct nvkm_object *object)
157 {
158 	struct nv31_mpeg *mpeg = (void *)object->engine;
159 	struct nv31_mpeg_chan *chan = (void *)object;
160 	unsigned long flags;
161 
162 	spin_lock_irqsave(&nv_engine(mpeg)->lock, flags);
163 	mpeg->chan = NULL;
164 	spin_unlock_irqrestore(&nv_engine(mpeg)->lock, flags);
165 	nvkm_object_destroy(&chan->base);
166 }
167 
168 struct nvkm_oclass
169 nv31_mpeg_cclass = {
170 	.handle = NV_ENGCTX(MPEG, 0x31),
171 	.ofuncs = &(struct nvkm_ofuncs) {
172 		.ctor = nv31_mpeg_context_ctor,
173 		.dtor = nv31_mpeg_context_dtor,
174 		.init = nvkm_object_init,
175 		.fini = nvkm_object_fini,
176 	},
177 };
178 
179 /*******************************************************************************
180  * PMPEG engine/subdev functions
181  ******************************************************************************/
182 
183 void
184 nv31_mpeg_tile_prog(struct nvkm_engine *engine, int i)
185 {
186 	struct nv31_mpeg *mpeg = (void *)engine;
187 	struct nvkm_device *device = mpeg->base.engine.subdev.device;
188 	struct nvkm_fb_tile *tile = &device->fb->tile.region[i];
189 
190 	nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch);
191 	nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit);
192 	nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr);
193 }
194 
195 void
196 nv31_mpeg_intr(struct nvkm_subdev *subdev)
197 {
198 	struct nv31_mpeg *mpeg = (void *)subdev;
199 	struct nvkm_device *device = mpeg->base.engine.subdev.device;
200 	struct nvkm_fifo *fifo = device->fifo;
201 	struct nvkm_handle *handle;
202 	struct nvkm_object *engctx;
203 	u32 stat = nvkm_rd32(device, 0x00b100);
204 	u32 type = nvkm_rd32(device, 0x00b230);
205 	u32 mthd = nvkm_rd32(device, 0x00b234);
206 	u32 data = nvkm_rd32(device, 0x00b238);
207 	u32 show = stat;
208 	unsigned long flags;
209 
210 	spin_lock_irqsave(&nv_engine(mpeg)->lock, flags);
211 	engctx = nv_object(mpeg->chan);
212 
213 	if (stat & 0x01000000) {
214 		/* happens on initial binding of the object */
215 		if (type == 0x00000020 && mthd == 0x0000) {
216 			nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
217 			show &= ~0x01000000;
218 		}
219 
220 		if (type == 0x00000010 && engctx) {
221 			handle = nvkm_handle_get_class(engctx, 0x3174);
222 			if (handle && !nv_call(handle->object, mthd, data))
223 				show &= ~0x01000000;
224 			nvkm_handle_put(handle);
225 		}
226 	}
227 
228 	nvkm_wr32(device, 0x00b100, stat);
229 	nvkm_wr32(device, 0x00b230, 0x00000001);
230 
231 	if (show) {
232 		nv_error(mpeg, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
233 			 fifo->chid(fifo, engctx),
234 			 nvkm_client_name(engctx), stat, type, mthd, data);
235 	}
236 
237 	spin_unlock_irqrestore(&nv_engine(mpeg)->lock, flags);
238 }
239 
240 static int
241 nv31_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
242 	       struct nvkm_oclass *oclass, void *data, u32 size,
243 	       struct nvkm_object **pobject)
244 {
245 	struct nv31_mpeg *mpeg;
246 	int ret;
247 
248 	ret = nvkm_mpeg_create(parent, engine, oclass, &mpeg);
249 	*pobject = nv_object(mpeg);
250 	if (ret)
251 		return ret;
252 
253 	nv_subdev(mpeg)->unit = 0x00000002;
254 	nv_subdev(mpeg)->intr = nv31_mpeg_intr;
255 	nv_engine(mpeg)->cclass = &nv31_mpeg_cclass;
256 	nv_engine(mpeg)->sclass = nv31_mpeg_sclass;
257 	nv_engine(mpeg)->tile_prog = nv31_mpeg_tile_prog;
258 	return 0;
259 }
260 
261 int
262 nv31_mpeg_init(struct nvkm_object *object)
263 {
264 	struct nvkm_engine *engine = nv_engine(object);
265 	struct nv31_mpeg *mpeg = (void *)object;
266 	struct nvkm_device *device = mpeg->base.engine.subdev.device;
267 	struct nvkm_fb *fb = device->fb;
268 	int ret, i;
269 
270 	ret = nvkm_mpeg_init(&mpeg->base);
271 	if (ret)
272 		return ret;
273 
274 	/* VPE init */
275 	nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
276 	nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
277 
278 	for (i = 0; i < fb->tile.regions; i++)
279 		engine->tile_prog(engine, i);
280 
281 	/* PMPEG init */
282 	nvkm_wr32(device, 0x00b32c, 0x00000000);
283 	nvkm_wr32(device, 0x00b314, 0x00000100);
284 	nvkm_wr32(device, 0x00b220, 0x00000031);
285 	nvkm_wr32(device, 0x00b300, 0x02001ec1);
286 	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
287 
288 	nvkm_wr32(device, 0x00b100, 0xffffffff);
289 	nvkm_wr32(device, 0x00b140, 0xffffffff);
290 
291 	if (nvkm_msec(device, 2000,
292 		if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
293 			break;
294 	) < 0) {
295 		nv_error(mpeg, "timeout 0x%08x\n", nvkm_rd32(device, 0x00b200));
296 		return -EBUSY;
297 	}
298 
299 	return 0;
300 }
301 
302 struct nvkm_oclass
303 nv31_mpeg_oclass = {
304 	.handle = NV_ENGINE(MPEG, 0x31),
305 	.ofuncs = &(struct nvkm_ofuncs) {
306 		.ctor = nv31_mpeg_ctor,
307 		.dtor = _nvkm_mpeg_dtor,
308 		.init = nv31_mpeg_init,
309 		.fini = _nvkm_mpeg_fini,
310 	},
311 };
312