1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: monk liu <monk.liu@amd.com>
23  */
24 
25 #include <drm/drmP.h>
26 #include <drm/drm_auth.h>
27 #include "amdgpu.h"
28 #include "amdgpu_sched.h"
29 
30 #define to_amdgpu_ctx_entity(e)	\
31 	container_of((e), struct amdgpu_ctx_entity, entity)
32 
33 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34 	[AMDGPU_HW_IP_GFX]	=	1,
35 	[AMDGPU_HW_IP_COMPUTE]	=	4,
36 	[AMDGPU_HW_IP_DMA]	=	2,
37 	[AMDGPU_HW_IP_UVD]	=	1,
38 	[AMDGPU_HW_IP_VCE]	=	1,
39 	[AMDGPU_HW_IP_UVD_ENC]	=	1,
40 	[AMDGPU_HW_IP_VCN_DEC]	=	1,
41 	[AMDGPU_HW_IP_VCN_ENC]	=	1,
42 	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
43 };
44 
45 static int amdgput_ctx_total_num_entities(void)
46 {
47 	unsigned i, num_entities = 0;
48 
49 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
50 		num_entities += amdgpu_ctx_num_entities[i];
51 
52 	return num_entities;
53 }
54 
55 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
56 				      enum drm_sched_priority priority)
57 {
58 	/* NORMAL and below are accessible by everyone */
59 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
60 		return 0;
61 
62 	if (capable(CAP_SYS_NICE))
63 		return 0;
64 
65 	if (drm_is_current_master(filp))
66 		return 0;
67 
68 	return -EACCES;
69 }
70 
71 static int amdgpu_ctx_init(struct amdgpu_device *adev,
72 			   enum drm_sched_priority priority,
73 			   struct drm_file *filp,
74 			   struct amdgpu_ctx *ctx)
75 {
76 	unsigned num_entities = amdgput_ctx_total_num_entities();
77 	unsigned i, j;
78 	int r;
79 
80 	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
81 		return -EINVAL;
82 
83 	r = amdgpu_ctx_priority_permit(filp, priority);
84 	if (r)
85 		return r;
86 
87 	memset(ctx, 0, sizeof(*ctx));
88 	ctx->adev = adev;
89 
90 	ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
91 			      sizeof(struct dma_fence*), GFP_KERNEL);
92 	if (!ctx->fences)
93 		return -ENOMEM;
94 
95 	ctx->entities[0] = kcalloc(num_entities,
96 				   sizeof(struct amdgpu_ctx_entity),
97 				   GFP_KERNEL);
98 	if (!ctx->entities[0]) {
99 		r = -ENOMEM;
100 		goto error_free_fences;
101 	}
102 
103 	for (i = 0; i < num_entities; ++i) {
104 		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
105 
106 		entity->sequence = 1;
107 		entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
108 	}
109 	for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
110 		ctx->entities[i] = ctx->entities[i - 1] +
111 			amdgpu_ctx_num_entities[i - 1];
112 
113 	kref_init(&ctx->refcount);
114 	spin_lock_init(&ctx->ring_lock);
115 	mutex_init(&ctx->lock);
116 
117 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
118 	ctx->reset_counter_query = ctx->reset_counter;
119 	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
120 	ctx->init_priority = priority;
121 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
122 
123 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
124 		struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
125 		struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
126 		unsigned num_rings;
127 		unsigned num_rqs = 0;
128 
129 		switch (i) {
130 		case AMDGPU_HW_IP_GFX:
131 			rings[0] = &adev->gfx.gfx_ring[0];
132 			num_rings = 1;
133 			break;
134 		case AMDGPU_HW_IP_COMPUTE:
135 			for (j = 0; j < adev->gfx.num_compute_rings; ++j)
136 				rings[j] = &adev->gfx.compute_ring[j];
137 			num_rings = adev->gfx.num_compute_rings;
138 			break;
139 		case AMDGPU_HW_IP_DMA:
140 			for (j = 0; j < adev->sdma.num_instances; ++j)
141 				rings[j] = &adev->sdma.instance[j].ring;
142 			num_rings = adev->sdma.num_instances;
143 			break;
144 		case AMDGPU_HW_IP_UVD:
145 			rings[0] = &adev->uvd.inst[0].ring;
146 			num_rings = 1;
147 			break;
148 		case AMDGPU_HW_IP_VCE:
149 			rings[0] = &adev->vce.ring[0];
150 			num_rings = 1;
151 			break;
152 		case AMDGPU_HW_IP_UVD_ENC:
153 			rings[0] = &adev->uvd.inst[0].ring_enc[0];
154 			num_rings = 1;
155 			break;
156 		case AMDGPU_HW_IP_VCN_DEC:
157 			rings[0] = &adev->vcn.ring_dec;
158 			num_rings = 1;
159 			break;
160 		case AMDGPU_HW_IP_VCN_ENC:
161 			rings[0] = &adev->vcn.ring_enc[0];
162 			num_rings = 1;
163 			break;
164 		case AMDGPU_HW_IP_VCN_JPEG:
165 			rings[0] = &adev->vcn.ring_jpeg;
166 			num_rings = 1;
167 			break;
168 		}
169 
170 		for (j = 0; j < num_rings; ++j) {
171 			if (!rings[j]->adev)
172 				continue;
173 
174 			rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
175 		}
176 
177 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
178 			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
179 						  rqs, num_rqs, &ctx->guilty);
180 		if (r)
181 			goto error_cleanup_entities;
182 	}
183 
184 	return 0;
185 
186 error_cleanup_entities:
187 	for (i = 0; i < num_entities; ++i)
188 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
189 	kfree(ctx->entities[0]);
190 
191 error_free_fences:
192 	kfree(ctx->fences);
193 	ctx->fences = NULL;
194 	return r;
195 }
196 
197 static void amdgpu_ctx_fini(struct kref *ref)
198 {
199 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
200 	unsigned num_entities = amdgput_ctx_total_num_entities();
201 	struct amdgpu_device *adev = ctx->adev;
202 	unsigned i, j;
203 
204 	if (!adev)
205 		return;
206 
207 	for (i = 0; i < num_entities; ++i)
208 		for (j = 0; j < amdgpu_sched_jobs; ++j)
209 			dma_fence_put(ctx->entities[0][i].fences[j]);
210 	kfree(ctx->fences);
211 	kfree(ctx->entities[0]);
212 
213 	mutex_destroy(&ctx->lock);
214 
215 	kfree(ctx);
216 }
217 
218 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
219 			  u32 ring, struct drm_sched_entity **entity)
220 {
221 	if (hw_ip >= AMDGPU_HW_IP_NUM) {
222 		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
223 		return -EINVAL;
224 	}
225 
226 	/* Right now all IPs have only one instance - multiple rings. */
227 	if (instance != 0) {
228 		DRM_DEBUG("invalid ip instance: %d\n", instance);
229 		return -EINVAL;
230 	}
231 
232 	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
233 		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
234 		return -EINVAL;
235 	}
236 
237 	*entity = &ctx->entities[hw_ip][ring].entity;
238 	return 0;
239 }
240 
241 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
242 			    struct amdgpu_fpriv *fpriv,
243 			    struct drm_file *filp,
244 			    enum drm_sched_priority priority,
245 			    uint32_t *id)
246 {
247 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
248 	struct amdgpu_ctx *ctx;
249 	int r;
250 
251 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
252 	if (!ctx)
253 		return -ENOMEM;
254 
255 	mutex_lock(&mgr->lock);
256 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
257 	if (r < 0) {
258 		mutex_unlock(&mgr->lock);
259 		kfree(ctx);
260 		return r;
261 	}
262 
263 	*id = (uint32_t)r;
264 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
265 	if (r) {
266 		idr_remove(&mgr->ctx_handles, *id);
267 		*id = 0;
268 		kfree(ctx);
269 	}
270 	mutex_unlock(&mgr->lock);
271 	return r;
272 }
273 
274 static void amdgpu_ctx_do_release(struct kref *ref)
275 {
276 	struct amdgpu_ctx *ctx;
277 	unsigned num_entities;
278 	u32 i;
279 
280 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
281 
282 	num_entities = 0;
283 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
284 		num_entities += amdgpu_ctx_num_entities[i];
285 
286 	for (i = 0; i < num_entities; i++)
287 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
288 
289 	amdgpu_ctx_fini(ref);
290 }
291 
292 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
293 {
294 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
295 	struct amdgpu_ctx *ctx;
296 
297 	mutex_lock(&mgr->lock);
298 	ctx = idr_remove(&mgr->ctx_handles, id);
299 	if (ctx)
300 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
301 	mutex_unlock(&mgr->lock);
302 	return ctx ? 0 : -EINVAL;
303 }
304 
305 static int amdgpu_ctx_query(struct amdgpu_device *adev,
306 			    struct amdgpu_fpriv *fpriv, uint32_t id,
307 			    union drm_amdgpu_ctx_out *out)
308 {
309 	struct amdgpu_ctx *ctx;
310 	struct amdgpu_ctx_mgr *mgr;
311 	unsigned reset_counter;
312 
313 	if (!fpriv)
314 		return -EINVAL;
315 
316 	mgr = &fpriv->ctx_mgr;
317 	mutex_lock(&mgr->lock);
318 	ctx = idr_find(&mgr->ctx_handles, id);
319 	if (!ctx) {
320 		mutex_unlock(&mgr->lock);
321 		return -EINVAL;
322 	}
323 
324 	/* TODO: these two are always zero */
325 	out->state.flags = 0x0;
326 	out->state.hangs = 0x0;
327 
328 	/* determine if a GPU reset has occured since the last call */
329 	reset_counter = atomic_read(&adev->gpu_reset_counter);
330 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
331 	if (ctx->reset_counter_query == reset_counter)
332 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
333 	else
334 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
335 	ctx->reset_counter_query = reset_counter;
336 
337 	mutex_unlock(&mgr->lock);
338 	return 0;
339 }
340 
341 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
342 	struct amdgpu_fpriv *fpriv, uint32_t id,
343 	union drm_amdgpu_ctx_out *out)
344 {
345 	struct amdgpu_ctx *ctx;
346 	struct amdgpu_ctx_mgr *mgr;
347 
348 	if (!fpriv)
349 		return -EINVAL;
350 
351 	mgr = &fpriv->ctx_mgr;
352 	mutex_lock(&mgr->lock);
353 	ctx = idr_find(&mgr->ctx_handles, id);
354 	if (!ctx) {
355 		mutex_unlock(&mgr->lock);
356 		return -EINVAL;
357 	}
358 
359 	out->state.flags = 0x0;
360 	out->state.hangs = 0x0;
361 
362 	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
363 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
364 
365 	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
366 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
367 
368 	if (atomic_read(&ctx->guilty))
369 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
370 
371 	mutex_unlock(&mgr->lock);
372 	return 0;
373 }
374 
375 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
376 		     struct drm_file *filp)
377 {
378 	int r;
379 	uint32_t id;
380 	enum drm_sched_priority priority;
381 
382 	union drm_amdgpu_ctx *args = data;
383 	struct amdgpu_device *adev = dev->dev_private;
384 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
385 
386 	r = 0;
387 	id = args->in.ctx_id;
388 	priority = amdgpu_to_sched_priority(args->in.priority);
389 
390 	/* For backwards compatibility reasons, we need to accept
391 	 * ioctls with garbage in the priority field */
392 	if (priority == DRM_SCHED_PRIORITY_INVALID)
393 		priority = DRM_SCHED_PRIORITY_NORMAL;
394 
395 	switch (args->in.op) {
396 	case AMDGPU_CTX_OP_ALLOC_CTX:
397 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
398 		args->out.alloc.ctx_id = id;
399 		break;
400 	case AMDGPU_CTX_OP_FREE_CTX:
401 		r = amdgpu_ctx_free(fpriv, id);
402 		break;
403 	case AMDGPU_CTX_OP_QUERY_STATE:
404 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
405 		break;
406 	case AMDGPU_CTX_OP_QUERY_STATE2:
407 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
408 		break;
409 	default:
410 		return -EINVAL;
411 	}
412 
413 	return r;
414 }
415 
416 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
417 {
418 	struct amdgpu_ctx *ctx;
419 	struct amdgpu_ctx_mgr *mgr;
420 
421 	if (!fpriv)
422 		return NULL;
423 
424 	mgr = &fpriv->ctx_mgr;
425 
426 	mutex_lock(&mgr->lock);
427 	ctx = idr_find(&mgr->ctx_handles, id);
428 	if (ctx)
429 		kref_get(&ctx->refcount);
430 	mutex_unlock(&mgr->lock);
431 	return ctx;
432 }
433 
434 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
435 {
436 	if (ctx == NULL)
437 		return -EINVAL;
438 
439 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
440 	return 0;
441 }
442 
443 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
444 			  struct drm_sched_entity *entity,
445 			  struct dma_fence *fence, uint64_t* handle)
446 {
447 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
448 	uint64_t seq = centity->sequence;
449 	struct dma_fence *other = NULL;
450 	unsigned idx = 0;
451 
452 	idx = seq & (amdgpu_sched_jobs - 1);
453 	other = centity->fences[idx];
454 	if (other)
455 		BUG_ON(!dma_fence_is_signaled(other));
456 
457 	dma_fence_get(fence);
458 
459 	spin_lock(&ctx->ring_lock);
460 	centity->fences[idx] = fence;
461 	centity->sequence++;
462 	spin_unlock(&ctx->ring_lock);
463 
464 	dma_fence_put(other);
465 	if (handle)
466 		*handle = seq;
467 }
468 
469 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
470 				       struct drm_sched_entity *entity,
471 				       uint64_t seq)
472 {
473 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
474 	struct dma_fence *fence;
475 
476 	spin_lock(&ctx->ring_lock);
477 
478 	if (seq == ~0ull)
479 		seq = centity->sequence - 1;
480 
481 	if (seq >= centity->sequence) {
482 		spin_unlock(&ctx->ring_lock);
483 		return ERR_PTR(-EINVAL);
484 	}
485 
486 
487 	if (seq + amdgpu_sched_jobs < centity->sequence) {
488 		spin_unlock(&ctx->ring_lock);
489 		return NULL;
490 	}
491 
492 	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
493 	spin_unlock(&ctx->ring_lock);
494 
495 	return fence;
496 }
497 
498 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
499 				  enum drm_sched_priority priority)
500 {
501 	unsigned num_entities = amdgput_ctx_total_num_entities();
502 	enum drm_sched_priority ctx_prio;
503 	unsigned i;
504 
505 	ctx->override_priority = priority;
506 
507 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
508 			ctx->init_priority : ctx->override_priority;
509 
510 	for (i = 0; i < num_entities; i++) {
511 		struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
512 
513 		drm_sched_entity_set_priority(entity, ctx_prio);
514 	}
515 }
516 
517 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
518 			       struct drm_sched_entity *entity)
519 {
520 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
521 	unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
522 	struct dma_fence *other = centity->fences[idx];
523 
524 	if (other) {
525 		signed long r;
526 		r = dma_fence_wait(other, true);
527 		if (r < 0) {
528 			if (r != -ERESTARTSYS)
529 				DRM_ERROR("Error (%ld) waiting for fence!\n", r);
530 
531 			return r;
532 		}
533 	}
534 
535 	return 0;
536 }
537 
538 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
539 {
540 	mutex_init(&mgr->lock);
541 	idr_init(&mgr->ctx_handles);
542 }
543 
544 void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
545 {
546 	unsigned num_entities = amdgput_ctx_total_num_entities();
547 	struct amdgpu_ctx *ctx;
548 	struct idr *idp;
549 	uint32_t id, i;
550 	long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
551 
552 	idp = &mgr->ctx_handles;
553 
554 	mutex_lock(&mgr->lock);
555 	idr_for_each_entry(idp, ctx, id) {
556 
557 		if (!ctx->adev) {
558 			mutex_unlock(&mgr->lock);
559 			return;
560 		}
561 
562 		for (i = 0; i < num_entities; i++) {
563 			struct drm_sched_entity *entity;
564 
565 			entity = &ctx->entities[0][i].entity;
566 			max_wait = drm_sched_entity_flush(entity, max_wait);
567 		}
568 	}
569 	mutex_unlock(&mgr->lock);
570 }
571 
572 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
573 {
574 	unsigned num_entities = amdgput_ctx_total_num_entities();
575 	struct amdgpu_ctx *ctx;
576 	struct idr *idp;
577 	uint32_t id, i;
578 
579 	idp = &mgr->ctx_handles;
580 
581 	idr_for_each_entry(idp, ctx, id) {
582 
583 		if (!ctx->adev)
584 			return;
585 
586 		if (kref_read(&ctx->refcount) != 1) {
587 			DRM_ERROR("ctx %p is still alive\n", ctx);
588 			continue;
589 		}
590 
591 		for (i = 0; i < num_entities; i++)
592 			drm_sched_entity_fini(&ctx->entities[0][i].entity);
593 	}
594 }
595 
596 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
597 {
598 	struct amdgpu_ctx *ctx;
599 	struct idr *idp;
600 	uint32_t id;
601 
602 	amdgpu_ctx_mgr_entity_fini(mgr);
603 
604 	idp = &mgr->ctx_handles;
605 
606 	idr_for_each_entry(idp, ctx, id) {
607 		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
608 			DRM_ERROR("ctx %p is still alive\n", ctx);
609 	}
610 
611 	idr_destroy(&mgr->ctx_handles);
612 	mutex_destroy(&mgr->lock);
613 }
614