1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv31.h"
25 
26 #include <core/client.h>
27 #include <subdev/fb.h>
28 #include <subdev/timer.h>
29 #include <engine/fifo.h>
30 
31 #include <nvif/class.h>
32 
33 /*******************************************************************************
34  * MPEG object classes
35  ******************************************************************************/
36 
37 static int
38 nv31_mpeg_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
39 		      int align, struct nvkm_gpuobj **pgpuobj)
40 {
41 	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
42 				  false, parent, pgpuobj);
43 	if (ret == 0) {
44 		nvkm_kmap(*pgpuobj);
45 		nvkm_wo32(*pgpuobj, 0x00, object->oclass_name);
46 		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
47 		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
48 		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
49 		nvkm_done(*pgpuobj);
50 	}
51 	return ret;
52 }
53 
54 const struct nvkm_object_func
55 nv31_mpeg_object = {
56 	.bind = nv31_mpeg_object_bind,
57 };
58 
59 /*******************************************************************************
60  * PMPEG context
61  ******************************************************************************/
62 
63 static void *
64 nv31_mpeg_chan_dtor(struct nvkm_object *object)
65 {
66 	struct nv31_mpeg_chan *chan = nv31_mpeg_chan(object);
67 	struct nv31_mpeg *mpeg = chan->mpeg;
68 	unsigned long flags;
69 
70 	spin_lock_irqsave(&mpeg->base.engine.lock, flags);
71 	if (mpeg->chan == chan)
72 		mpeg->chan = NULL;
73 	spin_unlock_irqrestore(&mpeg->base.engine.lock, flags);
74 	return chan;
75 }
76 
77 static const struct nvkm_object_func
78 nv31_mpeg_chan = {
79 	.dtor = nv31_mpeg_chan_dtor,
80 };
81 
82 int
83 nv31_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
84 		   const struct nvkm_oclass *oclass,
85 		   struct nvkm_object **pobject)
86 {
87 	struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine);
88 	struct nv31_mpeg_chan *chan;
89 	unsigned long flags;
90 	int ret = -EBUSY;
91 
92 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
93 		return -ENOMEM;
94 	nvkm_object_ctor(&nv31_mpeg_chan, oclass, &chan->object);
95 	chan->mpeg = mpeg;
96 	chan->fifo = fifoch;
97 	*pobject = &chan->object;
98 
99 	spin_lock_irqsave(&mpeg->base.engine.lock, flags);
100 	if (!mpeg->chan) {
101 		mpeg->chan = chan;
102 		ret = 0;
103 	}
104 	spin_unlock_irqrestore(&mpeg->base.engine.lock, flags);
105 	return ret;
106 }
107 
108 /*******************************************************************************
109  * PMPEG engine/subdev functions
110  ******************************************************************************/
111 
112 void
113 nv31_mpeg_tile_prog(struct nvkm_engine *engine, int i)
114 {
115 	struct nv31_mpeg *mpeg = (void *)engine;
116 	struct nvkm_device *device = mpeg->base.engine.subdev.device;
117 	struct nvkm_fb_tile *tile = &device->fb->tile.region[i];
118 
119 	nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch);
120 	nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit);
121 	nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr);
122 }
123 
124 static bool
125 nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
126 {
127 	u32 inst = data << 4;
128 	u32 dma0 = nvkm_rd32(device, 0x700000 + inst);
129 	u32 dma1 = nvkm_rd32(device, 0x700004 + inst);
130 	u32 dma2 = nvkm_rd32(device, 0x700008 + inst);
131 	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
132 	u32 size = dma1 + 1;
133 
134 	/* only allow linear DMA objects */
135 	if (!(dma0 & 0x00002000))
136 		return false;
137 
138 	if (mthd == 0x0190) {
139 		/* DMA_CMD */
140 		nvkm_mask(device, 0x00b300, 0x00010000,
141 				  (dma0 & 0x00030000) ? 0x00010000 : 0);
142 		nvkm_wr32(device, 0x00b334, base);
143 		nvkm_wr32(device, 0x00b324, size);
144 	} else
145 	if (mthd == 0x01a0) {
146 		/* DMA_DATA */
147 		nvkm_mask(device, 0x00b300, 0x00020000,
148 				  (dma0 & 0x00030000) ? 0x00020000 : 0);
149 		nvkm_wr32(device, 0x00b360, base);
150 		nvkm_wr32(device, 0x00b364, size);
151 	} else {
152 		/* DMA_IMAGE, VRAM only */
153 		if (dma0 & 0x00030000)
154 			return false;
155 
156 		nvkm_wr32(device, 0x00b370, base);
157 		nvkm_wr32(device, 0x00b374, size);
158 	}
159 
160 	return true;
161 }
162 
163 static bool
164 nv31_mpeg_mthd(struct nv31_mpeg *mpeg, u32 mthd, u32 data)
165 {
166 	struct nvkm_device *device = mpeg->base.engine.subdev.device;
167 	switch (mthd) {
168 	case 0x190:
169 	case 0x1a0:
170 	case 0x1b0:
171 		return mpeg->mthd_dma(device, mthd, data);
172 	default:
173 		break;
174 	}
175 	return false;
176 }
177 
178 void
179 nv31_mpeg_intr(struct nvkm_subdev *subdev)
180 {
181 	struct nv31_mpeg *mpeg = (void *)subdev;
182 	struct nvkm_device *device = mpeg->base.engine.subdev.device;
183 	u32 stat = nvkm_rd32(device, 0x00b100);
184 	u32 type = nvkm_rd32(device, 0x00b230);
185 	u32 mthd = nvkm_rd32(device, 0x00b234);
186 	u32 data = nvkm_rd32(device, 0x00b238);
187 	u32 show = stat;
188 	unsigned long flags;
189 
190 	spin_lock_irqsave(&mpeg->base.engine.lock, flags);
191 
192 	if (stat & 0x01000000) {
193 		/* happens on initial binding of the object */
194 		if (type == 0x00000020 && mthd == 0x0000) {
195 			nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
196 			show &= ~0x01000000;
197 		}
198 
199 		if (type == 0x00000010) {
200 			if (!nv31_mpeg_mthd(mpeg, mthd, data))
201 				show &= ~0x01000000;
202 		}
203 	}
204 
205 	nvkm_wr32(device, 0x00b100, stat);
206 	nvkm_wr32(device, 0x00b230, 0x00000001);
207 
208 	if (show) {
209 		nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n",
210 			   mpeg->chan ? mpeg->chan->fifo->chid : -1,
211 			   mpeg->chan ? mpeg->chan->object.client->name :
212 			   "unknown", stat, type, mthd, data);
213 	}
214 
215 	spin_unlock_irqrestore(&mpeg->base.engine.lock, flags);
216 }
217 
218 static const struct nvkm_engine_func
219 nv31_mpeg = {
220 	.fifo.cclass = nv31_mpeg_chan_new,
221 	.sclass = {
222 		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
223 		{}
224 	}
225 };
226 
227 static int
228 nv31_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
229 	       struct nvkm_oclass *oclass, void *data, u32 size,
230 	       struct nvkm_object **pobject)
231 {
232 	struct nv31_mpeg *mpeg;
233 	int ret;
234 
235 	ret = nvkm_mpeg_create(parent, engine, oclass, &mpeg);
236 	*pobject = nv_object(mpeg);
237 	if (ret)
238 		return ret;
239 
240 	mpeg->base.engine.func = &nv31_mpeg;
241 
242 	mpeg->mthd_dma = nv31_mpeg_mthd_dma;
243 	nv_subdev(mpeg)->unit = 0x00000002;
244 	nv_subdev(mpeg)->intr = nv31_mpeg_intr;
245 	nv_engine(mpeg)->tile_prog = nv31_mpeg_tile_prog;
246 	return 0;
247 }
248 
249 int
250 nv31_mpeg_init(struct nvkm_object *object)
251 {
252 	struct nvkm_engine *engine = nv_engine(object);
253 	struct nv31_mpeg *mpeg = (void *)object;
254 	struct nvkm_subdev *subdev = &mpeg->base.engine.subdev;
255 	struct nvkm_device *device = subdev->device;
256 	struct nvkm_fb *fb = device->fb;
257 	int ret, i;
258 
259 	ret = nvkm_mpeg_init(&mpeg->base);
260 	if (ret)
261 		return ret;
262 
263 	/* VPE init */
264 	nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
265 	nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
266 
267 	for (i = 0; i < fb->tile.regions; i++)
268 		engine->tile_prog(engine, i);
269 
270 	/* PMPEG init */
271 	nvkm_wr32(device, 0x00b32c, 0x00000000);
272 	nvkm_wr32(device, 0x00b314, 0x00000100);
273 	nvkm_wr32(device, 0x00b220, 0x00000031);
274 	nvkm_wr32(device, 0x00b300, 0x02001ec1);
275 	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
276 
277 	nvkm_wr32(device, 0x00b100, 0xffffffff);
278 	nvkm_wr32(device, 0x00b140, 0xffffffff);
279 
280 	if (nvkm_msec(device, 2000,
281 		if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
282 			break;
283 	) < 0) {
284 		nvkm_error(subdev, "timeout %08x\n",
285 			   nvkm_rd32(device, 0x00b200));
286 		return -EBUSY;
287 	}
288 
289 	return 0;
290 }
291 
292 struct nvkm_oclass
293 nv31_mpeg_oclass = {
294 	.handle = NV_ENGINE(MPEG, 0x31),
295 	.ofuncs = &(struct nvkm_ofuncs) {
296 		.ctor = nv31_mpeg_ctor,
297 		.dtor = _nvkm_mpeg_dtor,
298 		.init = nv31_mpeg_init,
299 		.fini = _nvkm_mpeg_fini,
300 	},
301 };
302