1 /* 2 * Copyright (C) 2007 Ben Skeggs. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #include "drmP.h" 28 #include "drm.h" 29 #include "nouveau_drv.h" 30 #include "nouveau_dma.h" 31 #include "nouveau_ramht.h" 32 33 void 34 nouveau_dma_pre_init(struct nouveau_channel *chan) 35 { 36 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 37 struct nouveau_bo *pushbuf = chan->pushbuf_bo; 38 39 if (dev_priv->card_type >= NV_50) { 40 const int ib_size = pushbuf->bo.mem.size / 2; 41 42 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2; 43 chan->dma.ib_max = (ib_size / 8) - 1; 44 chan->dma.ib_put = 0; 45 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; 46 47 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2; 48 } else { 49 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2; 50 } 51 52 chan->dma.put = 0; 53 chan->dma.cur = chan->dma.put; 54 chan->dma.free = chan->dma.max - chan->dma.cur; 55 } 56 57 int 58 nouveau_dma_init(struct nouveau_channel *chan) 59 { 60 struct drm_device *dev = chan->dev; 61 struct drm_nouveau_private *dev_priv = dev->dev_private; 62 int ret, i; 63 64 if (dev_priv->card_type >= NV_C0) { 65 ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039); 66 if (ret) 67 return ret; 68 69 ret = RING_SPACE(chan, 2); 70 if (ret) 71 return ret; 72 73 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1); 74 OUT_RING (chan, 0x00009039); 75 FIRE_RING (chan); 76 return 0; 77 } 78 79 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ 80 ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ? 81 0x0039 : 0x5039); 82 if (ret) 83 return ret; 84 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, 87 &chan->m2mf_ntfy); 88 if (ret) 89 return ret; 90 91 /* Insert NOPS for NOUVEAU_DMA_SKIPS */ 92 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); 93 if (ret) 94 return ret; 95 96 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 97 OUT_RING(chan, 0); 98 99 /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */ 100 ret = RING_SPACE(chan, 6); 101 if (ret) 102 return ret; 103 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1); 104 OUT_RING (chan, NvM2MF); 105 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3); 106 OUT_RING (chan, NvNotify0); 107 OUT_RING (chan, chan->vram_handle); 108 OUT_RING (chan, chan->gart_handle); 109 110 /* Sit back and pray the channel works.. */ 111 FIRE_RING(chan); 112 113 return 0; 114 } 115 116 void 117 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) 118 { 119 bool is_iomem; 120 u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem); 121 mem = &mem[chan->dma.cur]; 122 if (is_iomem) 123 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); 124 else 125 memcpy(mem, data, nr_dwords * 4); 126 chan->dma.cur += nr_dwords; 127 } 128 129 /* Fetch and adjust GPU GET pointer 130 * 131 * Returns: 132 * value >= 0, the adjusted GET pointer 133 * -EINVAL if GET pointer currently outside main push buffer 134 * -EBUSY if timeout exceeded 135 */ 136 static inline int 137 READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) 138 { 139 uint64_t val; 140 141 val = nvchan_rd32(chan, chan->user_get); 142 if (chan->user_get_hi) 143 val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32; 144 145 /* reset counter as long as GET is still advancing, this is 146 * to avoid misdetecting a GPU lockup if the GPU happens to 147 * just be processing an operation that takes a long time 148 */ 149 if (val != *prev_get) { 150 *prev_get = val; 151 *timeout = 0; 152 } 153 154 if ((++*timeout & 0xff) == 0) { 155 DRM_UDELAY(1); 156 if (*timeout > 100000) 157 return -EBUSY; 158 } 159 160 if (val < chan->pushbuf_base || 161 val > chan->pushbuf_base + (chan->dma.max << 2)) 162 return -EINVAL; 163 164 return (val - chan->pushbuf_base) >> 2; 165 } 166 167 void 168 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, 169 int delta, int length) 170 { 171 struct nouveau_bo *pb = chan->pushbuf_bo; 172 struct nouveau_vma *vma; 173 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 174 u64 offset; 175 176 vma = nouveau_bo_vma_find(bo, chan->vm); 177 BUG_ON(!vma); 178 offset = vma->offset + delta; 179 180 BUG_ON(chan->dma.ib_free < 1); 181 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); 182 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); 183 184 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; 185 186 DRM_MEMORYBARRIER(); 187 /* Flush writes. */ 188 nouveau_bo_rd32(pb, 0); 189 190 nvchan_wr32(chan, 0x8c, chan->dma.ib_put); 191 chan->dma.ib_free--; 192 } 193 194 static int 195 nv50_dma_push_wait(struct nouveau_channel *chan, int count) 196 { 197 uint32_t cnt = 0, prev_get = 0; 198 199 while (chan->dma.ib_free < count) { 200 uint32_t get = nvchan_rd32(chan, 0x88); 201 if (get != prev_get) { 202 prev_get = get; 203 cnt = 0; 204 } 205 206 if ((++cnt & 0xff) == 0) { 207 DRM_UDELAY(1); 208 if (cnt > 100000) 209 return -EBUSY; 210 } 211 212 chan->dma.ib_free = get - chan->dma.ib_put; 213 if (chan->dma.ib_free <= 0) 214 chan->dma.ib_free += chan->dma.ib_max; 215 } 216 217 return 0; 218 } 219 220 static int 221 nv50_dma_wait(struct nouveau_channel *chan, int slots, int count) 222 { 223 uint64_t prev_get = 0; 224 int ret, cnt = 0; 225 226 ret = nv50_dma_push_wait(chan, slots + 1); 227 if (unlikely(ret)) 228 return ret; 229 230 while (chan->dma.free < count) { 231 int get = READ_GET(chan, &prev_get, &cnt); 232 if (unlikely(get < 0)) { 233 if (get == -EINVAL) 234 continue; 235 236 return get; 237 } 238 239 if (get <= chan->dma.cur) { 240 chan->dma.free = chan->dma.max - chan->dma.cur; 241 if (chan->dma.free >= count) 242 break; 243 244 FIRE_RING(chan); 245 do { 246 get = READ_GET(chan, &prev_get, &cnt); 247 if (unlikely(get < 0)) { 248 if (get == -EINVAL) 249 continue; 250 return get; 251 } 252 } while (get == 0); 253 chan->dma.cur = 0; 254 chan->dma.put = 0; 255 } 256 257 chan->dma.free = get - chan->dma.cur - 1; 258 } 259 260 return 0; 261 } 262 263 int 264 nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) 265 { 266 uint64_t prev_get = 0; 267 int cnt = 0, get; 268 269 if (chan->dma.ib_max) 270 return nv50_dma_wait(chan, slots, size); 271 272 while (chan->dma.free < size) { 273 get = READ_GET(chan, &prev_get, &cnt); 274 if (unlikely(get == -EBUSY)) 275 return -EBUSY; 276 277 /* loop until we have a usable GET pointer. the value 278 * we read from the GPU may be outside the main ring if 279 * PFIFO is processing a buffer called from the main ring, 280 * discard these values until something sensible is seen. 281 * 282 * the other case we discard GET is while the GPU is fetching 283 * from the SKIPS area, so the code below doesn't have to deal 284 * with some fun corner cases. 285 */ 286 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS) 287 continue; 288 289 if (get <= chan->dma.cur) { 290 /* engine is fetching behind us, or is completely 291 * idle (GET == PUT) so we have free space up until 292 * the end of the push buffer 293 * 294 * we can only hit that path once per call due to 295 * looping back to the beginning of the push buffer, 296 * we'll hit the fetching-ahead-of-us path from that 297 * point on. 298 * 299 * the *one* exception to that rule is if we read 300 * GET==PUT, in which case the below conditional will 301 * always succeed and break us out of the wait loop. 302 */ 303 chan->dma.free = chan->dma.max - chan->dma.cur; 304 if (chan->dma.free >= size) 305 break; 306 307 /* not enough space left at the end of the push buffer, 308 * instruct the GPU to jump back to the start right 309 * after processing the currently pending commands. 310 */ 311 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 312 313 /* wait for GET to depart from the skips area. 314 * prevents writing GET==PUT and causing a race 315 * condition that causes us to think the GPU is 316 * idle when it's not. 317 */ 318 do { 319 get = READ_GET(chan, &prev_get, &cnt); 320 if (unlikely(get == -EBUSY)) 321 return -EBUSY; 322 if (unlikely(get == -EINVAL)) 323 continue; 324 } while (get <= NOUVEAU_DMA_SKIPS); 325 WRITE_PUT(NOUVEAU_DMA_SKIPS); 326 327 /* we're now submitting commands at the start of 328 * the push buffer. 329 */ 330 chan->dma.cur = 331 chan->dma.put = NOUVEAU_DMA_SKIPS; 332 } 333 334 /* engine fetching ahead of us, we have space up until the 335 * current GET pointer. the "- 1" is to ensure there's 336 * space left to emit a jump back to the beginning of the 337 * push buffer if we require it. we can never get GET == PUT 338 * here, so this is safe. 339 */ 340 chan->dma.free = get - chan->dma.cur - 1; 341 } 342 343 return 0; 344 } 345 346