1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: monk liu <monk.liu@amd.com>
23  */
24 
25 #include <drm/drm_auth.h>
26 #include "amdgpu.h"
27 #include "amdgpu_sched.h"
28 #include "amdgpu_ras.h"
29 
30 #define to_amdgpu_ctx_entity(e)	\
31 	container_of((e), struct amdgpu_ctx_entity, entity)
32 
33 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34 	[AMDGPU_HW_IP_GFX]	=	1,
35 	[AMDGPU_HW_IP_COMPUTE]	=	4,
36 	[AMDGPU_HW_IP_DMA]	=	2,
37 	[AMDGPU_HW_IP_UVD]	=	1,
38 	[AMDGPU_HW_IP_VCE]	=	1,
39 	[AMDGPU_HW_IP_UVD_ENC]	=	1,
40 	[AMDGPU_HW_IP_VCN_DEC]	=	1,
41 	[AMDGPU_HW_IP_VCN_ENC]	=	1,
42 	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
43 };
44 
45 static int amdgput_ctx_total_num_entities(void)
46 {
47 	unsigned i, num_entities = 0;
48 
49 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
50 		num_entities += amdgpu_ctx_num_entities[i];
51 
52 	return num_entities;
53 }
54 
55 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
56 				      enum drm_sched_priority priority)
57 {
58 	/* NORMAL and below are accessible by everyone */
59 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
60 		return 0;
61 
62 	if (capable(CAP_SYS_NICE))
63 		return 0;
64 
65 	if (drm_is_current_master(filp))
66 		return 0;
67 
68 	return -EACCES;
69 }
70 
71 static int amdgpu_ctx_init(struct amdgpu_device *adev,
72 			   enum drm_sched_priority priority,
73 			   struct drm_file *filp,
74 			   struct amdgpu_ctx *ctx)
75 {
76 	unsigned num_entities = amdgput_ctx_total_num_entities();
77 	unsigned i, j;
78 	int r;
79 
80 	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
81 		return -EINVAL;
82 
83 	r = amdgpu_ctx_priority_permit(filp, priority);
84 	if (r)
85 		return r;
86 
87 	memset(ctx, 0, sizeof(*ctx));
88 	ctx->adev = adev;
89 
90 	ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
91 			      sizeof(struct dma_fence*), GFP_KERNEL);
92 	if (!ctx->fences)
93 		return -ENOMEM;
94 
95 	ctx->entities[0] = kcalloc(num_entities,
96 				   sizeof(struct amdgpu_ctx_entity),
97 				   GFP_KERNEL);
98 	if (!ctx->entities[0]) {
99 		r = -ENOMEM;
100 		goto error_free_fences;
101 	}
102 
103 	for (i = 0; i < num_entities; ++i) {
104 		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
105 
106 		entity->sequence = 1;
107 		entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
108 	}
109 	for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
110 		ctx->entities[i] = ctx->entities[i - 1] +
111 			amdgpu_ctx_num_entities[i - 1];
112 
113 	kref_init(&ctx->refcount);
114 	spin_lock_init(&ctx->ring_lock);
115 	mutex_init(&ctx->lock);
116 
117 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
118 	ctx->reset_counter_query = ctx->reset_counter;
119 	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
120 	ctx->init_priority = priority;
121 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
122 
123 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
124 		struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
125 		struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
126 		unsigned num_rings;
127 		unsigned num_rqs = 0;
128 
129 		switch (i) {
130 		case AMDGPU_HW_IP_GFX:
131 			rings[0] = &adev->gfx.gfx_ring[0];
132 			num_rings = 1;
133 			break;
134 		case AMDGPU_HW_IP_COMPUTE:
135 			for (j = 0; j < adev->gfx.num_compute_rings; ++j)
136 				rings[j] = &adev->gfx.compute_ring[j];
137 			num_rings = adev->gfx.num_compute_rings;
138 			break;
139 		case AMDGPU_HW_IP_DMA:
140 			for (j = 0; j < adev->sdma.num_instances; ++j)
141 				rings[j] = &adev->sdma.instance[j].ring;
142 			num_rings = adev->sdma.num_instances;
143 			break;
144 		case AMDGPU_HW_IP_UVD:
145 			rings[0] = &adev->uvd.inst[0].ring;
146 			num_rings = 1;
147 			break;
148 		case AMDGPU_HW_IP_VCE:
149 			rings[0] = &adev->vce.ring[0];
150 			num_rings = 1;
151 			break;
152 		case AMDGPU_HW_IP_UVD_ENC:
153 			rings[0] = &adev->uvd.inst[0].ring_enc[0];
154 			num_rings = 1;
155 			break;
156 		case AMDGPU_HW_IP_VCN_DEC:
157 			rings[0] = &adev->vcn.ring_dec;
158 			num_rings = 1;
159 			break;
160 		case AMDGPU_HW_IP_VCN_ENC:
161 			rings[0] = &adev->vcn.ring_enc[0];
162 			num_rings = 1;
163 			break;
164 		case AMDGPU_HW_IP_VCN_JPEG:
165 			rings[0] = &adev->vcn.ring_jpeg;
166 			num_rings = 1;
167 			break;
168 		}
169 
170 		for (j = 0; j < num_rings; ++j) {
171 			if (!rings[j]->adev)
172 				continue;
173 
174 			rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
175 		}
176 
177 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
178 			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
179 						  rqs, num_rqs, &ctx->guilty);
180 		if (r)
181 			goto error_cleanup_entities;
182 	}
183 
184 	return 0;
185 
186 error_cleanup_entities:
187 	for (i = 0; i < num_entities; ++i)
188 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
189 	kfree(ctx->entities[0]);
190 
191 error_free_fences:
192 	kfree(ctx->fences);
193 	ctx->fences = NULL;
194 	return r;
195 }
196 
197 static void amdgpu_ctx_fini(struct kref *ref)
198 {
199 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
200 	unsigned num_entities = amdgput_ctx_total_num_entities();
201 	struct amdgpu_device *adev = ctx->adev;
202 	unsigned i, j;
203 
204 	if (!adev)
205 		return;
206 
207 	for (i = 0; i < num_entities; ++i)
208 		for (j = 0; j < amdgpu_sched_jobs; ++j)
209 			dma_fence_put(ctx->entities[0][i].fences[j]);
210 	kfree(ctx->fences);
211 	kfree(ctx->entities[0]);
212 
213 	mutex_destroy(&ctx->lock);
214 
215 	kfree(ctx);
216 }
217 
218 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
219 			  u32 ring, struct drm_sched_entity **entity)
220 {
221 	if (hw_ip >= AMDGPU_HW_IP_NUM) {
222 		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
223 		return -EINVAL;
224 	}
225 
226 	/* Right now all IPs have only one instance - multiple rings. */
227 	if (instance != 0) {
228 		DRM_DEBUG("invalid ip instance: %d\n", instance);
229 		return -EINVAL;
230 	}
231 
232 	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
233 		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
234 		return -EINVAL;
235 	}
236 
237 	*entity = &ctx->entities[hw_ip][ring].entity;
238 	return 0;
239 }
240 
241 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
242 			    struct amdgpu_fpriv *fpriv,
243 			    struct drm_file *filp,
244 			    enum drm_sched_priority priority,
245 			    uint32_t *id)
246 {
247 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
248 	struct amdgpu_ctx *ctx;
249 	int r;
250 
251 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
252 	if (!ctx)
253 		return -ENOMEM;
254 
255 	mutex_lock(&mgr->lock);
256 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
257 	if (r < 0) {
258 		mutex_unlock(&mgr->lock);
259 		kfree(ctx);
260 		return r;
261 	}
262 
263 	*id = (uint32_t)r;
264 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
265 	if (r) {
266 		idr_remove(&mgr->ctx_handles, *id);
267 		*id = 0;
268 		kfree(ctx);
269 	}
270 	mutex_unlock(&mgr->lock);
271 	return r;
272 }
273 
274 static void amdgpu_ctx_do_release(struct kref *ref)
275 {
276 	struct amdgpu_ctx *ctx;
277 	unsigned num_entities;
278 	u32 i;
279 
280 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
281 
282 	num_entities = 0;
283 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
284 		num_entities += amdgpu_ctx_num_entities[i];
285 
286 	for (i = 0; i < num_entities; i++)
287 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
288 
289 	amdgpu_ctx_fini(ref);
290 }
291 
292 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
293 {
294 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
295 	struct amdgpu_ctx *ctx;
296 
297 	mutex_lock(&mgr->lock);
298 	ctx = idr_remove(&mgr->ctx_handles, id);
299 	if (ctx)
300 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
301 	mutex_unlock(&mgr->lock);
302 	return ctx ? 0 : -EINVAL;
303 }
304 
305 static int amdgpu_ctx_query(struct amdgpu_device *adev,
306 			    struct amdgpu_fpriv *fpriv, uint32_t id,
307 			    union drm_amdgpu_ctx_out *out)
308 {
309 	struct amdgpu_ctx *ctx;
310 	struct amdgpu_ctx_mgr *mgr;
311 	unsigned reset_counter;
312 
313 	if (!fpriv)
314 		return -EINVAL;
315 
316 	mgr = &fpriv->ctx_mgr;
317 	mutex_lock(&mgr->lock);
318 	ctx = idr_find(&mgr->ctx_handles, id);
319 	if (!ctx) {
320 		mutex_unlock(&mgr->lock);
321 		return -EINVAL;
322 	}
323 
324 	/* TODO: these two are always zero */
325 	out->state.flags = 0x0;
326 	out->state.hangs = 0x0;
327 
328 	/* determine if a GPU reset has occured since the last call */
329 	reset_counter = atomic_read(&adev->gpu_reset_counter);
330 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
331 	if (ctx->reset_counter_query == reset_counter)
332 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
333 	else
334 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
335 	ctx->reset_counter_query = reset_counter;
336 
337 	mutex_unlock(&mgr->lock);
338 	return 0;
339 }
340 
341 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
342 	struct amdgpu_fpriv *fpriv, uint32_t id,
343 	union drm_amdgpu_ctx_out *out)
344 {
345 	struct amdgpu_ctx *ctx;
346 	struct amdgpu_ctx_mgr *mgr;
347 	uint32_t ras_counter;
348 
349 	if (!fpriv)
350 		return -EINVAL;
351 
352 	mgr = &fpriv->ctx_mgr;
353 	mutex_lock(&mgr->lock);
354 	ctx = idr_find(&mgr->ctx_handles, id);
355 	if (!ctx) {
356 		mutex_unlock(&mgr->lock);
357 		return -EINVAL;
358 	}
359 
360 	out->state.flags = 0x0;
361 	out->state.hangs = 0x0;
362 
363 	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
364 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
365 
366 	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
367 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
368 
369 	if (atomic_read(&ctx->guilty))
370 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
371 
372 	/*query ue count*/
373 	ras_counter = amdgpu_ras_query_error_count(adev, false);
374 	/*ras counter is monotonic increasing*/
375 	if (ras_counter != ctx->ras_counter_ue) {
376 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
377 		ctx->ras_counter_ue = ras_counter;
378 	}
379 
380 	/*query ce count*/
381 	ras_counter = amdgpu_ras_query_error_count(adev, true);
382 	if (ras_counter != ctx->ras_counter_ce) {
383 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
384 		ctx->ras_counter_ce = ras_counter;
385 	}
386 
387 	mutex_unlock(&mgr->lock);
388 	return 0;
389 }
390 
391 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
392 		     struct drm_file *filp)
393 {
394 	int r;
395 	uint32_t id;
396 	enum drm_sched_priority priority;
397 
398 	union drm_amdgpu_ctx *args = data;
399 	struct amdgpu_device *adev = dev->dev_private;
400 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
401 
402 	r = 0;
403 	id = args->in.ctx_id;
404 	priority = amdgpu_to_sched_priority(args->in.priority);
405 
406 	/* For backwards compatibility reasons, we need to accept
407 	 * ioctls with garbage in the priority field */
408 	if (priority == DRM_SCHED_PRIORITY_INVALID)
409 		priority = DRM_SCHED_PRIORITY_NORMAL;
410 
411 	switch (args->in.op) {
412 	case AMDGPU_CTX_OP_ALLOC_CTX:
413 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
414 		args->out.alloc.ctx_id = id;
415 		break;
416 	case AMDGPU_CTX_OP_FREE_CTX:
417 		r = amdgpu_ctx_free(fpriv, id);
418 		break;
419 	case AMDGPU_CTX_OP_QUERY_STATE:
420 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
421 		break;
422 	case AMDGPU_CTX_OP_QUERY_STATE2:
423 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
424 		break;
425 	default:
426 		return -EINVAL;
427 	}
428 
429 	return r;
430 }
431 
432 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
433 {
434 	struct amdgpu_ctx *ctx;
435 	struct amdgpu_ctx_mgr *mgr;
436 
437 	if (!fpriv)
438 		return NULL;
439 
440 	mgr = &fpriv->ctx_mgr;
441 
442 	mutex_lock(&mgr->lock);
443 	ctx = idr_find(&mgr->ctx_handles, id);
444 	if (ctx)
445 		kref_get(&ctx->refcount);
446 	mutex_unlock(&mgr->lock);
447 	return ctx;
448 }
449 
450 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
451 {
452 	if (ctx == NULL)
453 		return -EINVAL;
454 
455 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
456 	return 0;
457 }
458 
459 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
460 			  struct drm_sched_entity *entity,
461 			  struct dma_fence *fence, uint64_t* handle)
462 {
463 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
464 	uint64_t seq = centity->sequence;
465 	struct dma_fence *other = NULL;
466 	unsigned idx = 0;
467 
468 	idx = seq & (amdgpu_sched_jobs - 1);
469 	other = centity->fences[idx];
470 	if (other)
471 		BUG_ON(!dma_fence_is_signaled(other));
472 
473 	dma_fence_get(fence);
474 
475 	spin_lock(&ctx->ring_lock);
476 	centity->fences[idx] = fence;
477 	centity->sequence++;
478 	spin_unlock(&ctx->ring_lock);
479 
480 	dma_fence_put(other);
481 	if (handle)
482 		*handle = seq;
483 }
484 
485 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
486 				       struct drm_sched_entity *entity,
487 				       uint64_t seq)
488 {
489 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
490 	struct dma_fence *fence;
491 
492 	spin_lock(&ctx->ring_lock);
493 
494 	if (seq == ~0ull)
495 		seq = centity->sequence - 1;
496 
497 	if (seq >= centity->sequence) {
498 		spin_unlock(&ctx->ring_lock);
499 		return ERR_PTR(-EINVAL);
500 	}
501 
502 
503 	if (seq + amdgpu_sched_jobs < centity->sequence) {
504 		spin_unlock(&ctx->ring_lock);
505 		return NULL;
506 	}
507 
508 	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
509 	spin_unlock(&ctx->ring_lock);
510 
511 	return fence;
512 }
513 
514 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
515 				  enum drm_sched_priority priority)
516 {
517 	unsigned num_entities = amdgput_ctx_total_num_entities();
518 	enum drm_sched_priority ctx_prio;
519 	unsigned i;
520 
521 	ctx->override_priority = priority;
522 
523 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
524 			ctx->init_priority : ctx->override_priority;
525 
526 	for (i = 0; i < num_entities; i++) {
527 		struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
528 
529 		drm_sched_entity_set_priority(entity, ctx_prio);
530 	}
531 }
532 
533 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
534 			       struct drm_sched_entity *entity)
535 {
536 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
537 	unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
538 	struct dma_fence *other = centity->fences[idx];
539 
540 	if (other) {
541 		signed long r;
542 		r = dma_fence_wait(other, true);
543 		if (r < 0) {
544 			if (r != -ERESTARTSYS)
545 				DRM_ERROR("Error (%ld) waiting for fence!\n", r);
546 
547 			return r;
548 		}
549 	}
550 
551 	return 0;
552 }
553 
554 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
555 {
556 	mutex_init(&mgr->lock);
557 	idr_init(&mgr->ctx_handles);
558 }
559 
560 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
561 {
562 	unsigned num_entities = amdgput_ctx_total_num_entities();
563 	struct amdgpu_ctx *ctx;
564 	struct idr *idp;
565 	uint32_t id, i;
566 
567 	idp = &mgr->ctx_handles;
568 
569 	mutex_lock(&mgr->lock);
570 	idr_for_each_entry(idp, ctx, id) {
571 		for (i = 0; i < num_entities; i++) {
572 			struct drm_sched_entity *entity;
573 
574 			entity = &ctx->entities[0][i].entity;
575 			timeout = drm_sched_entity_flush(entity, timeout);
576 		}
577 	}
578 	mutex_unlock(&mgr->lock);
579 	return timeout;
580 }
581 
582 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
583 {
584 	unsigned num_entities = amdgput_ctx_total_num_entities();
585 	struct amdgpu_ctx *ctx;
586 	struct idr *idp;
587 	uint32_t id, i;
588 
589 	idp = &mgr->ctx_handles;
590 
591 	idr_for_each_entry(idp, ctx, id) {
592 		if (kref_read(&ctx->refcount) != 1) {
593 			DRM_ERROR("ctx %p is still alive\n", ctx);
594 			continue;
595 		}
596 
597 		for (i = 0; i < num_entities; i++)
598 			drm_sched_entity_fini(&ctx->entities[0][i].entity);
599 	}
600 }
601 
602 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
603 {
604 	struct amdgpu_ctx *ctx;
605 	struct idr *idp;
606 	uint32_t id;
607 
608 	amdgpu_ctx_mgr_entity_fini(mgr);
609 
610 	idp = &mgr->ctx_handles;
611 
612 	idr_for_each_entry(idp, ctx, id) {
613 		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
614 			DRM_ERROR("ctx %p is still alive\n", ctx);
615 	}
616 
617 	idr_destroy(&mgr->ctx_handles);
618 	mutex_destroy(&mgr->lock);
619 }
620