1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: monk liu <monk.liu@amd.com> 23 */ 24 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 28 static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) 29 { 30 unsigned i, j; 31 int r; 32 33 memset(ctx, 0, sizeof(*ctx)); 34 ctx->adev = adev; 35 kref_init(&ctx->refcount); 36 spin_lock_init(&ctx->ring_lock); 37 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, 38 sizeof(struct dma_fence*), GFP_KERNEL); 39 if (!ctx->fences) 40 return -ENOMEM; 41 42 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 43 ctx->rings[i].sequence = 1; 44 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; 45 } 46 47 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); 48 49 /* create context entity for each ring */ 50 for (i = 0; i < adev->num_rings; i++) { 51 struct amdgpu_ring *ring = adev->rings[i]; 52 struct amd_sched_rq *rq; 53 54 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; 55 56 if (ring == &adev->gfx.kiq.ring) 57 continue; 58 59 r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, 60 rq, amdgpu_sched_jobs); 61 if (r) 62 goto failed; 63 } 64 65 return 0; 66 67 failed: 68 for (j = 0; j < i; j++) 69 amd_sched_entity_fini(&adev->rings[j]->sched, 70 &ctx->rings[j].entity); 71 kfree(ctx->fences); 72 ctx->fences = NULL; 73 return r; 74 } 75 76 static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) 77 { 78 struct amdgpu_device *adev = ctx->adev; 79 unsigned i, j; 80 81 if (!adev) 82 return; 83 84 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 85 for (j = 0; j < amdgpu_sched_jobs; ++j) 86 dma_fence_put(ctx->rings[i].fences[j]); 87 kfree(ctx->fences); 88 ctx->fences = NULL; 89 90 for (i = 0; i < adev->num_rings; i++) 91 amd_sched_entity_fini(&adev->rings[i]->sched, 92 &ctx->rings[i].entity); 93 } 94 95 static int amdgpu_ctx_alloc(struct amdgpu_device *adev, 96 struct amdgpu_fpriv *fpriv, 97 uint32_t *id) 98 { 99 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 100 struct amdgpu_ctx *ctx; 101 int r; 102 103 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 104 if (!ctx) 105 return -ENOMEM; 106 107 mutex_lock(&mgr->lock); 108 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); 109 if (r < 0) { 110 mutex_unlock(&mgr->lock); 111 kfree(ctx); 112 return r; 113 } 114 *id = (uint32_t)r; 115 r = amdgpu_ctx_init(adev, ctx); 116 if (r) { 117 idr_remove(&mgr->ctx_handles, *id); 118 *id = 0; 119 kfree(ctx); 120 } 121 mutex_unlock(&mgr->lock); 122 return r; 123 } 124 125 static void amdgpu_ctx_do_release(struct kref *ref) 126 { 127 struct amdgpu_ctx *ctx; 128 129 ctx = container_of(ref, struct amdgpu_ctx, refcount); 130 131 amdgpu_ctx_fini(ctx); 132 133 kfree(ctx); 134 } 135 136 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) 137 { 138 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 139 struct amdgpu_ctx *ctx; 140 141 mutex_lock(&mgr->lock); 142 ctx = idr_remove(&mgr->ctx_handles, id); 143 if (ctx) 144 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 145 mutex_unlock(&mgr->lock); 146 return ctx ? 0 : -EINVAL; 147 } 148 149 static int amdgpu_ctx_query(struct amdgpu_device *adev, 150 struct amdgpu_fpriv *fpriv, uint32_t id, 151 union drm_amdgpu_ctx_out *out) 152 { 153 struct amdgpu_ctx *ctx; 154 struct amdgpu_ctx_mgr *mgr; 155 unsigned reset_counter; 156 157 if (!fpriv) 158 return -EINVAL; 159 160 mgr = &fpriv->ctx_mgr; 161 mutex_lock(&mgr->lock); 162 ctx = idr_find(&mgr->ctx_handles, id); 163 if (!ctx) { 164 mutex_unlock(&mgr->lock); 165 return -EINVAL; 166 } 167 168 /* TODO: these two are always zero */ 169 out->state.flags = 0x0; 170 out->state.hangs = 0x0; 171 172 /* determine if a GPU reset has occured since the last call */ 173 reset_counter = atomic_read(&adev->gpu_reset_counter); 174 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ 175 if (ctx->reset_counter == reset_counter) 176 out->state.reset_status = AMDGPU_CTX_NO_RESET; 177 else 178 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; 179 ctx->reset_counter = reset_counter; 180 181 mutex_unlock(&mgr->lock); 182 return 0; 183 } 184 185 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 186 struct drm_file *filp) 187 { 188 int r; 189 uint32_t id; 190 191 union drm_amdgpu_ctx *args = data; 192 struct amdgpu_device *adev = dev->dev_private; 193 struct amdgpu_fpriv *fpriv = filp->driver_priv; 194 195 r = 0; 196 id = args->in.ctx_id; 197 198 switch (args->in.op) { 199 case AMDGPU_CTX_OP_ALLOC_CTX: 200 r = amdgpu_ctx_alloc(adev, fpriv, &id); 201 args->out.alloc.ctx_id = id; 202 break; 203 case AMDGPU_CTX_OP_FREE_CTX: 204 r = amdgpu_ctx_free(fpriv, id); 205 break; 206 case AMDGPU_CTX_OP_QUERY_STATE: 207 r = amdgpu_ctx_query(adev, fpriv, id, &args->out); 208 break; 209 default: 210 return -EINVAL; 211 } 212 213 return r; 214 } 215 216 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) 217 { 218 struct amdgpu_ctx *ctx; 219 struct amdgpu_ctx_mgr *mgr; 220 221 if (!fpriv) 222 return NULL; 223 224 mgr = &fpriv->ctx_mgr; 225 226 mutex_lock(&mgr->lock); 227 ctx = idr_find(&mgr->ctx_handles, id); 228 if (ctx) 229 kref_get(&ctx->refcount); 230 mutex_unlock(&mgr->lock); 231 return ctx; 232 } 233 234 int amdgpu_ctx_put(struct amdgpu_ctx *ctx) 235 { 236 if (ctx == NULL) 237 return -EINVAL; 238 239 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 240 return 0; 241 } 242 243 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 244 struct dma_fence *fence) 245 { 246 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 247 uint64_t seq = cring->sequence; 248 unsigned idx = 0; 249 struct dma_fence *other = NULL; 250 251 idx = seq & (amdgpu_sched_jobs - 1); 252 other = cring->fences[idx]; 253 if (other) { 254 signed long r; 255 r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); 256 if (r < 0) 257 DRM_ERROR("Error (%ld) waiting for fence!\n", r); 258 } 259 260 dma_fence_get(fence); 261 262 spin_lock(&ctx->ring_lock); 263 cring->fences[idx] = fence; 264 cring->sequence++; 265 spin_unlock(&ctx->ring_lock); 266 267 dma_fence_put(other); 268 269 return seq; 270 } 271 272 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 273 struct amdgpu_ring *ring, uint64_t seq) 274 { 275 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 276 struct dma_fence *fence; 277 278 spin_lock(&ctx->ring_lock); 279 280 if (seq == ~0ull) 281 seq = ctx->rings[ring->idx].sequence - 1; 282 283 if (seq >= cring->sequence) { 284 spin_unlock(&ctx->ring_lock); 285 return ERR_PTR(-EINVAL); 286 } 287 288 289 if (seq + amdgpu_sched_jobs < cring->sequence) { 290 spin_unlock(&ctx->ring_lock); 291 return NULL; 292 } 293 294 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); 295 spin_unlock(&ctx->ring_lock); 296 297 return fence; 298 } 299 300 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) 301 { 302 mutex_init(&mgr->lock); 303 idr_init(&mgr->ctx_handles); 304 } 305 306 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) 307 { 308 struct amdgpu_ctx *ctx; 309 struct idr *idp; 310 uint32_t id; 311 312 idp = &mgr->ctx_handles; 313 314 idr_for_each_entry(idp, ctx, id) { 315 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) 316 DRM_ERROR("ctx %p is still alive\n", ctx); 317 } 318 319 idr_destroy(&mgr->ctx_handles); 320 mutex_destroy(&mgr->lock); 321 } 322