1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include "drmP.h"
28 #include "drm.h"
29 #include "nouveau_drv.h"
30 #include "nouveau_dma.h"
31 
32 void
33 nouveau_dma_pre_init(struct nouveau_channel *chan)
34 {
35 	chan->dma.max  = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
36 	chan->dma.put  = 0;
37 	chan->dma.cur  = chan->dma.put;
38 	chan->dma.free = chan->dma.max - chan->dma.cur;
39 }
40 
41 int
42 nouveau_dma_init(struct nouveau_channel *chan)
43 {
44 	struct drm_device *dev = chan->dev;
45 	struct drm_nouveau_private *dev_priv = dev->dev_private;
46 	struct nouveau_gpuobj *m2mf = NULL;
47 	struct nouveau_gpuobj *nvsw = NULL;
48 	int ret, i;
49 
50 	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
51 	ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
52 				    0x0039 : 0x5039, &m2mf);
53 	if (ret)
54 		return ret;
55 
56 	ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
57 	if (ret)
58 		return ret;
59 
60 	/* Create an NV_SW object for various sync purposes */
61 	ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
62 	if (ret)
63 		return ret;
64 
65 	ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
66 	if (ret)
67 		return ret;
68 
69 	/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
70 	ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
71 	if (ret)
72 		return ret;
73 
74 	/* Map push buffer */
75 	ret = nouveau_bo_map(chan->pushbuf_bo);
76 	if (ret)
77 		return ret;
78 
79 	/* Map M2MF notifier object - fbcon. */
80 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
81 		ret = nouveau_bo_map(chan->notifier_bo);
82 		if (ret)
83 			return ret;
84 	}
85 
86 	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
87 	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
88 	if (ret)
89 		return ret;
90 
91 	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
92 		OUT_RING(chan, 0);
93 
94 	/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
95 	ret = RING_SPACE(chan, 4);
96 	if (ret)
97 		return ret;
98 	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
99 	OUT_RING(chan, NvM2MF);
100 	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
101 	OUT_RING(chan, NvNotify0);
102 
103 	/* Initialise NV_SW */
104 	ret = RING_SPACE(chan, 2);
105 	if (ret)
106 		return ret;
107 	BEGIN_RING(chan, NvSubSw, 0, 1);
108 	OUT_RING(chan, NvSw);
109 
110 	/* Sit back and pray the channel works.. */
111 	FIRE_RING(chan);
112 
113 	return 0;
114 }
115 
116 void
117 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
118 {
119 	bool is_iomem;
120 	u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
121 	mem = &mem[chan->dma.cur];
122 	if (is_iomem)
123 		memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
124 	else
125 		memcpy(mem, data, nr_dwords * 4);
126 	chan->dma.cur += nr_dwords;
127 }
128 
129 /* Fetch and adjust GPU GET pointer
130  *
131  * Returns:
132  *  value >= 0, the adjusted GET pointer
133  *  -EINVAL if GET pointer currently outside main push buffer
134  *  -EBUSY if timeout exceeded
135  */
136 static inline int
137 READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
138 {
139 	uint32_t val;
140 
141 	val = nvchan_rd32(chan, chan->user_get);
142 
143 	/* reset counter as long as GET is still advancing, this is
144 	 * to avoid misdetecting a GPU lockup if the GPU happens to
145 	 * just be processing an operation that takes a long time
146 	 */
147 	if (val != *prev_get) {
148 		*prev_get = val;
149 		*timeout = 0;
150 	}
151 
152 	if ((++*timeout & 0xff) == 0) {
153 		DRM_UDELAY(1);
154 		if (*timeout > 100000)
155 			return -EBUSY;
156 	}
157 
158 	if (val < chan->pushbuf_base ||
159 	    val > chan->pushbuf_base + (chan->dma.max << 2))
160 		return -EINVAL;
161 
162 	return (val - chan->pushbuf_base) >> 2;
163 }
164 
165 int
166 nouveau_dma_wait(struct nouveau_channel *chan, int size)
167 {
168 	uint32_t prev_get = 0, cnt = 0;
169 	int get;
170 
171 	while (chan->dma.free < size) {
172 		get = READ_GET(chan, &prev_get, &cnt);
173 		if (unlikely(get == -EBUSY))
174 			return -EBUSY;
175 
176 		/* loop until we have a usable GET pointer.  the value
177 		 * we read from the GPU may be outside the main ring if
178 		 * PFIFO is processing a buffer called from the main ring,
179 		 * discard these values until something sensible is seen.
180 		 *
181 		 * the other case we discard GET is while the GPU is fetching
182 		 * from the SKIPS area, so the code below doesn't have to deal
183 		 * with some fun corner cases.
184 		 */
185 		if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
186 			continue;
187 
188 		if (get <= chan->dma.cur) {
189 			/* engine is fetching behind us, or is completely
190 			 * idle (GET == PUT) so we have free space up until
191 			 * the end of the push buffer
192 			 *
193 			 * we can only hit that path once per call due to
194 			 * looping back to the beginning of the push buffer,
195 			 * we'll hit the fetching-ahead-of-us path from that
196 			 * point on.
197 			 *
198 			 * the *one* exception to that rule is if we read
199 			 * GET==PUT, in which case the below conditional will
200 			 * always succeed and break us out of the wait loop.
201 			 */
202 			chan->dma.free = chan->dma.max - chan->dma.cur;
203 			if (chan->dma.free >= size)
204 				break;
205 
206 			/* not enough space left at the end of the push buffer,
207 			 * instruct the GPU to jump back to the start right
208 			 * after processing the currently pending commands.
209 			 */
210 			OUT_RING(chan, chan->pushbuf_base | 0x20000000);
211 
212 			/* wait for GET to depart from the skips area.
213 			 * prevents writing GET==PUT and causing a race
214 			 * condition that causes us to think the GPU is
215 			 * idle when it's not.
216 			 */
217 			do {
218 				get = READ_GET(chan, &prev_get, &cnt);
219 				if (unlikely(get == -EBUSY))
220 					return -EBUSY;
221 				if (unlikely(get == -EINVAL))
222 					continue;
223 			} while (get <= NOUVEAU_DMA_SKIPS);
224 			WRITE_PUT(NOUVEAU_DMA_SKIPS);
225 
226 			/* we're now submitting commands at the start of
227 			 * the push buffer.
228 			 */
229 			chan->dma.cur  =
230 			chan->dma.put  = NOUVEAU_DMA_SKIPS;
231 		}
232 
233 		/* engine fetching ahead of us, we have space up until the
234 		 * current GET pointer.  the "- 1" is to ensure there's
235 		 * space left to emit a jump back to the beginning of the
236 		 * push buffer if we require it.  we can never get GET == PUT
237 		 * here, so this is safe.
238 		 */
239 		chan->dma.free = get - chan->dma.cur - 1;
240 	}
241 
242 	return 0;
243 }
244 
245