1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv04.h"
25 
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/ramht.h>
29 #include <subdev/fb.h>
30 #include <subdev/instmem/nv04.h>
31 
32 #include <nvif/class.h>
33 #include <nvif/unpack.h>
34 
35 static struct ramfc_desc
36 nv40_ramfc[] = {
37 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
38 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
39 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
40 	{ 32,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
41 	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
42 	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_STATE },
43 	{ 28,  0, 0x18,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
44 	{  2, 28, 0x18, 28, 0x002058 },
45 	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_ENGINE },
46 	{ 32,  0, 0x20,  0, NV04_PFIFO_CACHE1_PULL1 },
47 	{ 32,  0, 0x24,  0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
48 	{ 32,  0, 0x28,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
49 	{ 32,  0, 0x2c,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
50 	{ 32,  0, 0x30,  0, NV10_PFIFO_CACHE1_SEMAPHORE },
51 	{ 32,  0, 0x34,  0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
52 	{ 32,  0, 0x38,  0, NV40_PFIFO_GRCTX_INSTANCE },
53 	{ 17,  0, 0x3c,  0, NV04_PFIFO_DMA_TIMESLICE },
54 	{ 32,  0, 0x40,  0, 0x0032e4 },
55 	{ 32,  0, 0x44,  0, 0x0032e8 },
56 	{ 32,  0, 0x4c,  0, 0x002088 },
57 	{ 32,  0, 0x50,  0, 0x003300 },
58 	{ 32,  0, 0x54,  0, 0x00330c },
59 	{}
60 };
61 
62 /*******************************************************************************
63  * FIFO channel objects
64  ******************************************************************************/
65 
66 static int
67 nv40_fifo_object_attach(struct nvkm_object *parent,
68 			struct nvkm_object *object, u32 handle)
69 {
70 	struct nv04_fifo *fifo = (void *)parent->engine;
71 	struct nv04_fifo_chan *chan = (void *)parent;
72 	u32 context, chid = chan->base.chid;
73 	int ret;
74 
75 	if (nv_iclass(object, NV_GPUOBJ_CLASS))
76 		context = nv_gpuobj(object)->addr >> 4;
77 	else
78 		context = 0x00000004; /* just non-zero */
79 
80 	switch (nv_engidx(object->engine)) {
81 	case NVDEV_ENGINE_DMAOBJ:
82 	case NVDEV_ENGINE_SW:
83 		context |= 0x00000000;
84 		break;
85 	case NVDEV_ENGINE_GR:
86 		context |= 0x00100000;
87 		break;
88 	case NVDEV_ENGINE_MPEG:
89 		context |= 0x00200000;
90 		break;
91 	default:
92 		return -EINVAL;
93 	}
94 
95 	context |= chid << 23;
96 
97 	mutex_lock(&nv_subdev(fifo)->mutex);
98 	ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context);
99 	mutex_unlock(&nv_subdev(fifo)->mutex);
100 	return ret;
101 }
102 
103 static int
104 nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
105 {
106 	struct nv04_fifo *fifo = (void *)parent->engine;
107 	struct nv04_fifo_chan *chan = (void *)parent;
108 	struct nvkm_device *device = fifo->base.engine.subdev.device;
109 	unsigned long flags;
110 	u32 reg, ctx;
111 
112 	switch (nv_engidx(engctx->engine)) {
113 	case NVDEV_ENGINE_SW:
114 		return 0;
115 	case NVDEV_ENGINE_GR:
116 		reg = 0x32e0;
117 		ctx = 0x38;
118 		break;
119 	case NVDEV_ENGINE_MPEG:
120 		reg = 0x330c;
121 		ctx = 0x54;
122 		break;
123 	default:
124 		return -EINVAL;
125 	}
126 
127 	spin_lock_irqsave(&fifo->base.lock, flags);
128 	nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
129 	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
130 
131 	if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid)
132 		nvkm_wr32(device, reg, nv_engctx(engctx)->addr);
133 	nvkm_kmap(fifo->ramfc);
134 	nvkm_wo32(fifo->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
135 	nvkm_done(fifo->ramfc);
136 
137 	nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
138 	spin_unlock_irqrestore(&fifo->base.lock, flags);
139 	return 0;
140 }
141 
142 static int
143 nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
144 			 struct nvkm_object *engctx)
145 {
146 	struct nv04_fifo *fifo = (void *)parent->engine;
147 	struct nv04_fifo_chan *chan = (void *)parent;
148 	struct nvkm_device *device = fifo->base.engine.subdev.device;
149 	unsigned long flags;
150 	u32 reg, ctx;
151 
152 	switch (nv_engidx(engctx->engine)) {
153 	case NVDEV_ENGINE_SW:
154 		return 0;
155 	case NVDEV_ENGINE_GR:
156 		reg = 0x32e0;
157 		ctx = 0x38;
158 		break;
159 	case NVDEV_ENGINE_MPEG:
160 		reg = 0x330c;
161 		ctx = 0x54;
162 		break;
163 	default:
164 		return -EINVAL;
165 	}
166 
167 	spin_lock_irqsave(&fifo->base.lock, flags);
168 	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
169 
170 	if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid)
171 		nvkm_wr32(device, reg, 0x00000000);
172 	nvkm_kmap(fifo->ramfc);
173 	nvkm_wo32(fifo->ramfc, chan->ramfc + ctx, 0x00000000);
174 	nvkm_done(fifo->ramfc);
175 
176 	nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
177 	spin_unlock_irqrestore(&fifo->base.lock, flags);
178 	return 0;
179 }
180 
181 static int
182 nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
183 		    struct nvkm_oclass *oclass, void *data, u32 size,
184 		    struct nvkm_object **pobject)
185 {
186 	union {
187 		struct nv03_channel_dma_v0 v0;
188 	} *args = data;
189 	struct nv04_fifo *fifo = (void *)engine;
190 	struct nv04_fifo_chan *chan;
191 	int ret;
192 
193 	nvif_ioctl(parent, "create channel dma size %d\n", size);
194 	if (nvif_unpack(args->v0, 0, 0, false)) {
195 		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
196 				   "offset %08x\n", args->v0.version,
197 			   args->v0.pushbuf, args->v0.offset);
198 	} else
199 		return ret;
200 
201 	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
202 				       0x1000, args->v0.pushbuf,
203 				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
204 				       (1ULL << NVDEV_ENGINE_SW) |
205 				       (1ULL << NVDEV_ENGINE_GR) |
206 				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
207 	*pobject = nv_object(chan);
208 	if (ret)
209 		return ret;
210 
211 	args->v0.chid = chan->base.chid;
212 
213 	nv_parent(chan)->context_attach = nv40_fifo_context_attach;
214 	nv_parent(chan)->context_detach = nv40_fifo_context_detach;
215 	nv_parent(chan)->object_attach = nv40_fifo_object_attach;
216 	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
217 	chan->ramfc = chan->base.chid * 128;
218 
219 	nvkm_kmap(fifo->ramfc);
220 	nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
221 	nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
222 	nvkm_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
223 	nvkm_wo32(fifo->ramfc, chan->ramfc + 0x18, 0x30000000 |
224 			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
225 			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
226 #ifdef __BIG_ENDIAN
227 			     NV_PFIFO_CACHE1_BIG_ENDIAN |
228 #endif
229 			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
230 	nvkm_wo32(fifo->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
231 	nvkm_done(fifo->ramfc);
232 	return 0;
233 }
234 
235 static struct nvkm_ofuncs
236 nv40_fifo_ofuncs = {
237 	.ctor = nv40_fifo_chan_ctor,
238 	.dtor = nv04_fifo_chan_dtor,
239 	.init = nv04_fifo_chan_init,
240 	.fini = nv04_fifo_chan_fini,
241 	.map  = _nvkm_fifo_channel_map,
242 	.rd32 = _nvkm_fifo_channel_rd32,
243 	.wr32 = _nvkm_fifo_channel_wr32,
244 	.ntfy = _nvkm_fifo_channel_ntfy
245 };
246 
247 static struct nvkm_oclass
248 nv40_fifo_sclass[] = {
249 	{ NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
250 	{}
251 };
252 
253 /*******************************************************************************
254  * FIFO context - basically just the instmem reserved for the channel
255  ******************************************************************************/
256 
257 static struct nvkm_oclass
258 nv40_fifo_cclass = {
259 	.handle = NV_ENGCTX(FIFO, 0x40),
260 	.ofuncs = &(struct nvkm_ofuncs) {
261 		.ctor = nv04_fifo_context_ctor,
262 		.dtor = _nvkm_fifo_context_dtor,
263 		.init = _nvkm_fifo_context_init,
264 		.fini = _nvkm_fifo_context_fini,
265 		.rd32 = _nvkm_fifo_context_rd32,
266 		.wr32 = _nvkm_fifo_context_wr32,
267 	},
268 };
269 
270 /*******************************************************************************
271  * PFIFO engine
272  ******************************************************************************/
273 
274 static int
275 nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
276 	       struct nvkm_oclass *oclass, void *data, u32 size,
277 	       struct nvkm_object **pobject)
278 {
279 	struct nv04_instmem *imem = nv04_instmem(parent);
280 	struct nv04_fifo *fifo;
281 	int ret;
282 
283 	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &fifo);
284 	*pobject = nv_object(fifo);
285 	if (ret)
286 		return ret;
287 
288 	nvkm_ramht_ref(imem->ramht, &fifo->ramht);
289 	nvkm_gpuobj_ref(imem->ramro, &fifo->ramro);
290 	nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc);
291 
292 	nv_subdev(fifo)->unit = 0x00000100;
293 	nv_subdev(fifo)->intr = nv04_fifo_intr;
294 	nv_engine(fifo)->cclass = &nv40_fifo_cclass;
295 	nv_engine(fifo)->sclass = nv40_fifo_sclass;
296 	fifo->base.pause = nv04_fifo_pause;
297 	fifo->base.start = nv04_fifo_start;
298 	fifo->ramfc_desc = nv40_ramfc;
299 	return 0;
300 }
301 
302 static int
303 nv40_fifo_init(struct nvkm_object *object)
304 {
305 	struct nv04_fifo *fifo = (void *)object;
306 	struct nvkm_device *device = fifo->base.engine.subdev.device;
307 	struct nvkm_fb *fb = device->fb;
308 	int ret;
309 
310 	ret = nvkm_fifo_init(&fifo->base);
311 	if (ret)
312 		return ret;
313 
314 	nvkm_wr32(device, 0x002040, 0x000000ff);
315 	nvkm_wr32(device, 0x002044, 0x2101ffff);
316 	nvkm_wr32(device, 0x002058, 0x00000001);
317 
318 	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
319 				       ((fifo->ramht->bits - 9) << 16) |
320 				        (fifo->ramht->gpuobj.addr >> 8));
321 	nvkm_wr32(device, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8);
322 
323 	switch (nv_device(fifo)->chipset) {
324 	case 0x47:
325 	case 0x49:
326 	case 0x4b:
327 		nvkm_wr32(device, 0x002230, 0x00000001);
328 	case 0x40:
329 	case 0x41:
330 	case 0x42:
331 	case 0x43:
332 	case 0x45:
333 	case 0x48:
334 		nvkm_wr32(device, 0x002220, 0x00030002);
335 		break;
336 	default:
337 		nvkm_wr32(device, 0x002230, 0x00000000);
338 		nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 +
339 					 fifo->ramfc->addr) >> 16) |
340 					0x00030000);
341 		break;
342 	}
343 
344 	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
345 
346 	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
347 	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
348 
349 	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
350 	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
351 	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
352 	return 0;
353 }
354 
355 struct nvkm_oclass *
356 nv40_fifo_oclass = &(struct nvkm_oclass) {
357 	.handle = NV_ENGINE(FIFO, 0x40),
358 	.ofuncs = &(struct nvkm_ofuncs) {
359 		.ctor = nv40_fifo_ctor,
360 		.dtor = nv04_fifo_dtor,
361 		.init = nv40_fifo_init,
362 		.fini = _nvkm_fifo_fini,
363 	},
364 };
365