1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include "drmP.h"
28 #include "drm.h"
29 #include "nouveau_drv.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_ramht.h"
32 
33 void
34 nouveau_dma_pre_init(struct nouveau_channel *chan)
35 {
36 	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
37 	struct nouveau_bo *pushbuf = chan->pushbuf_bo;
38 
39 	if (dev_priv->card_type >= NV_50) {
40 		const int ib_size = pushbuf->bo.mem.size / 2;
41 
42 		chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
43 		chan->dma.ib_max = (ib_size / 8) - 1;
44 		chan->dma.ib_put = 0;
45 		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
46 
47 		chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
48 	} else {
49 		chan->dma.max  = (pushbuf->bo.mem.size >> 2) - 2;
50 	}
51 
52 	chan->dma.put  = 0;
53 	chan->dma.cur  = chan->dma.put;
54 	chan->dma.free = chan->dma.max - chan->dma.cur;
55 }
56 
57 int
58 nouveau_dma_init(struct nouveau_channel *chan)
59 {
60 	struct drm_device *dev = chan->dev;
61 	struct drm_nouveau_private *dev_priv = dev->dev_private;
62 	int ret, i;
63 
64 	if (dev_priv->card_type >= NV_C0) {
65 		ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
66 		if (ret)
67 			return ret;
68 
69 		ret = RING_SPACE(chan, 2);
70 		if (ret)
71 			return ret;
72 
73 		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
74 		OUT_RING  (chan, 0x00009039);
75 		FIRE_RING (chan);
76 		return 0;
77 	}
78 
79 	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
80 	ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
81 				    0x0039 : 0x5039);
82 	if (ret)
83 		return ret;
84 
85 	/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
86 	ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
87 				     &chan->m2mf_ntfy);
88 	if (ret)
89 		return ret;
90 
91 	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
92 	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
93 	if (ret)
94 		return ret;
95 
96 	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
97 		OUT_RING(chan, 0);
98 
99 	/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
100 	ret = RING_SPACE(chan, 6);
101 	if (ret)
102 		return ret;
103 	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
104 	OUT_RING  (chan, NvM2MF);
105 	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
106 	OUT_RING  (chan, NvNotify0);
107 	OUT_RING  (chan, chan->vram_handle);
108 	OUT_RING  (chan, chan->gart_handle);
109 
110 	/* Sit back and pray the channel works.. */
111 	FIRE_RING(chan);
112 
113 	return 0;
114 }
115 
116 void
117 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
118 {
119 	bool is_iomem;
120 	u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
121 	mem = &mem[chan->dma.cur];
122 	if (is_iomem)
123 		memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
124 	else
125 		memcpy(mem, data, nr_dwords * 4);
126 	chan->dma.cur += nr_dwords;
127 }
128 
129 /* Fetch and adjust GPU GET pointer
130  *
131  * Returns:
132  *  value >= 0, the adjusted GET pointer
133  *  -EINVAL if GET pointer currently outside main push buffer
134  *  -EBUSY if timeout exceeded
135  */
136 static inline int
137 READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
138 {
139 	uint32_t val;
140 
141 	val = nvchan_rd32(chan, chan->user_get);
142 
143 	/* reset counter as long as GET is still advancing, this is
144 	 * to avoid misdetecting a GPU lockup if the GPU happens to
145 	 * just be processing an operation that takes a long time
146 	 */
147 	if (val != *prev_get) {
148 		*prev_get = val;
149 		*timeout = 0;
150 	}
151 
152 	if ((++*timeout & 0xff) == 0) {
153 		DRM_UDELAY(1);
154 		if (*timeout > 100000)
155 			return -EBUSY;
156 	}
157 
158 	if (val < chan->pushbuf_base ||
159 	    val > chan->pushbuf_base + (chan->dma.max << 2))
160 		return -EINVAL;
161 
162 	return (val - chan->pushbuf_base) >> 2;
163 }
164 
165 void
166 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
167 	      int delta, int length)
168 {
169 	struct nouveau_bo *pb = chan->pushbuf_bo;
170 	uint64_t offset = bo->bo.offset + delta;
171 	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
172 
173 	BUG_ON(chan->dma.ib_free < 1);
174 	nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
175 	nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
176 
177 	chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
178 
179 	DRM_MEMORYBARRIER();
180 	/* Flush writes. */
181 	nouveau_bo_rd32(pb, 0);
182 
183 	nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
184 	chan->dma.ib_free--;
185 }
186 
187 static int
188 nv50_dma_push_wait(struct nouveau_channel *chan, int count)
189 {
190 	uint32_t cnt = 0, prev_get = 0;
191 
192 	while (chan->dma.ib_free < count) {
193 		uint32_t get = nvchan_rd32(chan, 0x88);
194 		if (get != prev_get) {
195 			prev_get = get;
196 			cnt = 0;
197 		}
198 
199 		if ((++cnt & 0xff) == 0) {
200 			DRM_UDELAY(1);
201 			if (cnt > 100000)
202 				return -EBUSY;
203 		}
204 
205 		chan->dma.ib_free = get - chan->dma.ib_put;
206 		if (chan->dma.ib_free <= 0)
207 			chan->dma.ib_free += chan->dma.ib_max;
208 	}
209 
210 	return 0;
211 }
212 
213 static int
214 nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
215 {
216 	uint32_t cnt = 0, prev_get = 0;
217 	int ret;
218 
219 	ret = nv50_dma_push_wait(chan, slots + 1);
220 	if (unlikely(ret))
221 		return ret;
222 
223 	while (chan->dma.free < count) {
224 		int get = READ_GET(chan, &prev_get, &cnt);
225 		if (unlikely(get < 0)) {
226 			if (get == -EINVAL)
227 				continue;
228 
229 			return get;
230 		}
231 
232 		if (get <= chan->dma.cur) {
233 			chan->dma.free = chan->dma.max - chan->dma.cur;
234 			if (chan->dma.free >= count)
235 				break;
236 
237 			FIRE_RING(chan);
238 			do {
239 				get = READ_GET(chan, &prev_get, &cnt);
240 				if (unlikely(get < 0)) {
241 					if (get == -EINVAL)
242 						continue;
243 					return get;
244 				}
245 			} while (get == 0);
246 			chan->dma.cur = 0;
247 			chan->dma.put = 0;
248 		}
249 
250 		chan->dma.free = get - chan->dma.cur - 1;
251 	}
252 
253 	return 0;
254 }
255 
256 int
257 nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
258 {
259 	uint32_t prev_get = 0, cnt = 0;
260 	int get;
261 
262 	if (chan->dma.ib_max)
263 		return nv50_dma_wait(chan, slots, size);
264 
265 	while (chan->dma.free < size) {
266 		get = READ_GET(chan, &prev_get, &cnt);
267 		if (unlikely(get == -EBUSY))
268 			return -EBUSY;
269 
270 		/* loop until we have a usable GET pointer.  the value
271 		 * we read from the GPU may be outside the main ring if
272 		 * PFIFO is processing a buffer called from the main ring,
273 		 * discard these values until something sensible is seen.
274 		 *
275 		 * the other case we discard GET is while the GPU is fetching
276 		 * from the SKIPS area, so the code below doesn't have to deal
277 		 * with some fun corner cases.
278 		 */
279 		if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
280 			continue;
281 
282 		if (get <= chan->dma.cur) {
283 			/* engine is fetching behind us, or is completely
284 			 * idle (GET == PUT) so we have free space up until
285 			 * the end of the push buffer
286 			 *
287 			 * we can only hit that path once per call due to
288 			 * looping back to the beginning of the push buffer,
289 			 * we'll hit the fetching-ahead-of-us path from that
290 			 * point on.
291 			 *
292 			 * the *one* exception to that rule is if we read
293 			 * GET==PUT, in which case the below conditional will
294 			 * always succeed and break us out of the wait loop.
295 			 */
296 			chan->dma.free = chan->dma.max - chan->dma.cur;
297 			if (chan->dma.free >= size)
298 				break;
299 
300 			/* not enough space left at the end of the push buffer,
301 			 * instruct the GPU to jump back to the start right
302 			 * after processing the currently pending commands.
303 			 */
304 			OUT_RING(chan, chan->pushbuf_base | 0x20000000);
305 
306 			/* wait for GET to depart from the skips area.
307 			 * prevents writing GET==PUT and causing a race
308 			 * condition that causes us to think the GPU is
309 			 * idle when it's not.
310 			 */
311 			do {
312 				get = READ_GET(chan, &prev_get, &cnt);
313 				if (unlikely(get == -EBUSY))
314 					return -EBUSY;
315 				if (unlikely(get == -EINVAL))
316 					continue;
317 			} while (get <= NOUVEAU_DMA_SKIPS);
318 			WRITE_PUT(NOUVEAU_DMA_SKIPS);
319 
320 			/* we're now submitting commands at the start of
321 			 * the push buffer.
322 			 */
323 			chan->dma.cur  =
324 			chan->dma.put  = NOUVEAU_DMA_SKIPS;
325 		}
326 
327 		/* engine fetching ahead of us, we have space up until the
328 		 * current GET pointer.  the "- 1" is to ensure there's
329 		 * space left to emit a jump back to the beginning of the
330 		 * push buffer if we require it.  we can never get GET == PUT
331 		 * here, so this is safe.
332 		 */
333 		chan->dma.free = get - chan->dma.cur - 1;
334 	}
335 
336 	return 0;
337 }
338 
339