1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nouveau_drv.h" 25 #include "nouveau_dma.h" 26 #include "nouveau_fence.h" 27 #include "nouveau_vmm.h" 28 29 #include "nv50_display.h" 30 31 #include <nvif/push206e.h> 32 33 #include <nvhw/class/cl826f.h> 34 35 static int 36 nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence) 37 { 38 struct nvif_push *push = chan->chan.push; 39 int ret = PUSH_WAIT(push, 8); 40 if (ret == 0) { 41 PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle); 42 43 PUSH_MTHD(push, NV826F, SEMAPHOREA, 44 NVVAL(NV826F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)), 45 46 SEMAPHOREB, lower_32_bits(virtual), 47 SEMAPHOREC, sequence, 48 49 SEMAPHORED, 50 NVDEF(NV826F, SEMAPHORED, OPERATION, RELEASE), 51 52 NON_STALLED_INTERRUPT, 0); 53 PUSH_KICK(push); 54 } 55 return ret; 56 } 57 58 static int 59 nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence) 60 { 61 struct nvif_push *push = chan->chan.push; 62 int ret = PUSH_WAIT(push, 7); 63 if (ret == 0) { 64 PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle); 65 66 PUSH_MTHD(push, NV826F, SEMAPHOREA, 67 NVVAL(NV826F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)), 68 69 SEMAPHOREB, lower_32_bits(virtual), 70 SEMAPHOREC, sequence, 71 72 SEMAPHORED, 73 NVDEF(NV826F, SEMAPHORED, OPERATION, ACQ_GEQ)); 74 PUSH_KICK(push); 75 } 76 return ret; 77 } 78 79 static int 80 nv84_fence_emit(struct nouveau_fence *fence) 81 { 82 struct nouveau_channel *chan = fence->channel; 83 struct nv84_fence_chan *fctx = chan->fence; 84 u64 addr = fctx->vma->addr + chan->chid * 16; 85 86 return fctx->base.emit32(chan, addr, fence->base.seqno); 87 } 88 89 static int 90 nv84_fence_sync(struct nouveau_fence *fence, 91 struct nouveau_channel *prev, struct nouveau_channel *chan) 92 { 93 struct nv84_fence_chan *fctx = chan->fence; 94 u64 addr = fctx->vma->addr + prev->chid * 16; 95 96 return fctx->base.sync32(chan, addr, fence->base.seqno); 97 } 98 99 static u32 100 nv84_fence_read(struct nouveau_channel *chan) 101 { 102 struct nv84_fence_priv *priv = chan->drm->fence; 103 return nouveau_bo_rd32(priv->bo, chan->chid * 16/4); 104 } 105 106 static void 107 nv84_fence_context_del(struct nouveau_channel *chan) 108 { 109 struct nv84_fence_priv *priv = chan->drm->fence; 110 struct nv84_fence_chan *fctx = chan->fence; 111 112 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); 113 mutex_lock(&priv->mutex); 114 nouveau_vma_del(&fctx->vma); 115 mutex_unlock(&priv->mutex); 116 nouveau_fence_context_del(&fctx->base); 117 chan->fence = NULL; 118 nouveau_fence_context_free(&fctx->base); 119 } 120 121 int 122 nv84_fence_context_new(struct nouveau_channel *chan) 123 { 124 struct nv84_fence_priv *priv = chan->drm->fence; 125 struct nv84_fence_chan *fctx; 126 int ret; 127 128 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 129 if (!fctx) 130 return -ENOMEM; 131 132 nouveau_fence_context_new(chan, &fctx->base); 133 fctx->base.emit = nv84_fence_emit; 134 fctx->base.sync = nv84_fence_sync; 135 fctx->base.read = nv84_fence_read; 136 fctx->base.emit32 = nv84_fence_emit32; 137 fctx->base.sync32 = nv84_fence_sync32; 138 fctx->base.sequence = nv84_fence_read(chan); 139 140 mutex_lock(&priv->mutex); 141 ret = nouveau_vma_new(priv->bo, chan->vmm, &fctx->vma); 142 mutex_unlock(&priv->mutex); 143 144 if (ret) 145 nv84_fence_context_del(chan); 146 return ret; 147 } 148 149 static bool 150 nv84_fence_suspend(struct nouveau_drm *drm) 151 { 152 struct nv84_fence_priv *priv = drm->fence; 153 int i; 154 155 priv->suspend = vmalloc(array_size(sizeof(u32), drm->chan.nr)); 156 if (priv->suspend) { 157 for (i = 0; i < drm->chan.nr; i++) 158 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4); 159 } 160 161 return priv->suspend != NULL; 162 } 163 164 static void 165 nv84_fence_resume(struct nouveau_drm *drm) 166 { 167 struct nv84_fence_priv *priv = drm->fence; 168 int i; 169 170 if (priv->suspend) { 171 for (i = 0; i < drm->chan.nr; i++) 172 nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]); 173 vfree(priv->suspend); 174 priv->suspend = NULL; 175 } 176 } 177 178 static void 179 nv84_fence_destroy(struct nouveau_drm *drm) 180 { 181 struct nv84_fence_priv *priv = drm->fence; 182 nouveau_bo_unmap(priv->bo); 183 if (priv->bo) 184 nouveau_bo_unpin(priv->bo); 185 nouveau_bo_ref(NULL, &priv->bo); 186 drm->fence = NULL; 187 kfree(priv); 188 } 189 190 int 191 nv84_fence_create(struct nouveau_drm *drm) 192 { 193 struct nv84_fence_priv *priv; 194 u32 domain; 195 int ret; 196 197 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); 198 if (!priv) 199 return -ENOMEM; 200 201 priv->base.dtor = nv84_fence_destroy; 202 priv->base.suspend = nv84_fence_suspend; 203 priv->base.resume = nv84_fence_resume; 204 priv->base.context_new = nv84_fence_context_new; 205 priv->base.context_del = nv84_fence_context_del; 206 207 priv->base.uevent = true; 208 209 mutex_init(&priv->mutex); 210 211 /* Use VRAM if there is any ; otherwise fallback to system memory */ 212 domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : 213 /* 214 * fences created in sysmem must be non-cached or we 215 * will lose CPU/GPU coherency! 216 */ 217 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; 218 ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0, 219 domain, 0, 0, NULL, NULL, &priv->bo); 220 if (ret == 0) { 221 ret = nouveau_bo_pin(priv->bo, domain, false); 222 if (ret == 0) { 223 ret = nouveau_bo_map(priv->bo); 224 if (ret) 225 nouveau_bo_unpin(priv->bo); 226 } 227 if (ret) 228 nouveau_bo_ref(NULL, &priv->bo); 229 } 230 231 if (ret) 232 nv84_fence_destroy(drm); 233 return ret; 234 } 235