1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: monk liu <monk.liu@amd.com>
23  */
24 
25 #include <drm/drmP.h>
26 #include <drm/drm_auth.h>
27 #include "amdgpu.h"
28 #include "amdgpu_sched.h"
29 #include "amdgpu_ras.h"
30 
31 #define to_amdgpu_ctx_entity(e)	\
32 	container_of((e), struct amdgpu_ctx_entity, entity)
33 
34 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
35 	[AMDGPU_HW_IP_GFX]	=	1,
36 	[AMDGPU_HW_IP_COMPUTE]	=	4,
37 	[AMDGPU_HW_IP_DMA]	=	2,
38 	[AMDGPU_HW_IP_UVD]	=	1,
39 	[AMDGPU_HW_IP_VCE]	=	1,
40 	[AMDGPU_HW_IP_UVD_ENC]	=	1,
41 	[AMDGPU_HW_IP_VCN_DEC]	=	1,
42 	[AMDGPU_HW_IP_VCN_ENC]	=	1,
43 	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
44 };
45 
46 static int amdgput_ctx_total_num_entities(void)
47 {
48 	unsigned i, num_entities = 0;
49 
50 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
51 		num_entities += amdgpu_ctx_num_entities[i];
52 
53 	return num_entities;
54 }
55 
56 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
57 				      enum drm_sched_priority priority)
58 {
59 	/* NORMAL and below are accessible by everyone */
60 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
61 		return 0;
62 
63 	if (capable(CAP_SYS_NICE))
64 		return 0;
65 
66 	if (drm_is_current_master(filp))
67 		return 0;
68 
69 	return -EACCES;
70 }
71 
72 static int amdgpu_ctx_init(struct amdgpu_device *adev,
73 			   enum drm_sched_priority priority,
74 			   struct drm_file *filp,
75 			   struct amdgpu_ctx *ctx)
76 {
77 	unsigned num_entities = amdgput_ctx_total_num_entities();
78 	unsigned i, j;
79 	int r;
80 
81 	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
82 		return -EINVAL;
83 
84 	r = amdgpu_ctx_priority_permit(filp, priority);
85 	if (r)
86 		return r;
87 
88 	memset(ctx, 0, sizeof(*ctx));
89 	ctx->adev = adev;
90 
91 	ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
92 			      sizeof(struct dma_fence*), GFP_KERNEL);
93 	if (!ctx->fences)
94 		return -ENOMEM;
95 
96 	ctx->entities[0] = kcalloc(num_entities,
97 				   sizeof(struct amdgpu_ctx_entity),
98 				   GFP_KERNEL);
99 	if (!ctx->entities[0]) {
100 		r = -ENOMEM;
101 		goto error_free_fences;
102 	}
103 
104 	for (i = 0; i < num_entities; ++i) {
105 		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
106 
107 		entity->sequence = 1;
108 		entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
109 	}
110 	for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
111 		ctx->entities[i] = ctx->entities[i - 1] +
112 			amdgpu_ctx_num_entities[i - 1];
113 
114 	kref_init(&ctx->refcount);
115 	spin_lock_init(&ctx->ring_lock);
116 	mutex_init(&ctx->lock);
117 
118 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
119 	ctx->reset_counter_query = ctx->reset_counter;
120 	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
121 	ctx->init_priority = priority;
122 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
123 
124 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
125 		struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
126 		struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
127 		unsigned num_rings;
128 		unsigned num_rqs = 0;
129 
130 		switch (i) {
131 		case AMDGPU_HW_IP_GFX:
132 			rings[0] = &adev->gfx.gfx_ring[0];
133 			num_rings = 1;
134 			break;
135 		case AMDGPU_HW_IP_COMPUTE:
136 			for (j = 0; j < adev->gfx.num_compute_rings; ++j)
137 				rings[j] = &adev->gfx.compute_ring[j];
138 			num_rings = adev->gfx.num_compute_rings;
139 			break;
140 		case AMDGPU_HW_IP_DMA:
141 			for (j = 0; j < adev->sdma.num_instances; ++j)
142 				rings[j] = &adev->sdma.instance[j].ring;
143 			num_rings = adev->sdma.num_instances;
144 			break;
145 		case AMDGPU_HW_IP_UVD:
146 			rings[0] = &adev->uvd.inst[0].ring;
147 			num_rings = 1;
148 			break;
149 		case AMDGPU_HW_IP_VCE:
150 			rings[0] = &adev->vce.ring[0];
151 			num_rings = 1;
152 			break;
153 		case AMDGPU_HW_IP_UVD_ENC:
154 			rings[0] = &adev->uvd.inst[0].ring_enc[0];
155 			num_rings = 1;
156 			break;
157 		case AMDGPU_HW_IP_VCN_DEC:
158 			rings[0] = &adev->vcn.ring_dec;
159 			num_rings = 1;
160 			break;
161 		case AMDGPU_HW_IP_VCN_ENC:
162 			rings[0] = &adev->vcn.ring_enc[0];
163 			num_rings = 1;
164 			break;
165 		case AMDGPU_HW_IP_VCN_JPEG:
166 			rings[0] = &adev->vcn.ring_jpeg;
167 			num_rings = 1;
168 			break;
169 		}
170 
171 		for (j = 0; j < num_rings; ++j) {
172 			if (!rings[j]->adev)
173 				continue;
174 
175 			rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
176 		}
177 
178 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
179 			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
180 						  rqs, num_rqs, &ctx->guilty);
181 		if (r)
182 			goto error_cleanup_entities;
183 	}
184 
185 	return 0;
186 
187 error_cleanup_entities:
188 	for (i = 0; i < num_entities; ++i)
189 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
190 	kfree(ctx->entities[0]);
191 
192 error_free_fences:
193 	kfree(ctx->fences);
194 	ctx->fences = NULL;
195 	return r;
196 }
197 
198 static void amdgpu_ctx_fini(struct kref *ref)
199 {
200 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
201 	unsigned num_entities = amdgput_ctx_total_num_entities();
202 	struct amdgpu_device *adev = ctx->adev;
203 	unsigned i, j;
204 
205 	if (!adev)
206 		return;
207 
208 	for (i = 0; i < num_entities; ++i)
209 		for (j = 0; j < amdgpu_sched_jobs; ++j)
210 			dma_fence_put(ctx->entities[0][i].fences[j]);
211 	kfree(ctx->fences);
212 	kfree(ctx->entities[0]);
213 
214 	mutex_destroy(&ctx->lock);
215 
216 	kfree(ctx);
217 }
218 
219 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
220 			  u32 ring, struct drm_sched_entity **entity)
221 {
222 	if (hw_ip >= AMDGPU_HW_IP_NUM) {
223 		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
224 		return -EINVAL;
225 	}
226 
227 	/* Right now all IPs have only one instance - multiple rings. */
228 	if (instance != 0) {
229 		DRM_DEBUG("invalid ip instance: %d\n", instance);
230 		return -EINVAL;
231 	}
232 
233 	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
234 		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
235 		return -EINVAL;
236 	}
237 
238 	*entity = &ctx->entities[hw_ip][ring].entity;
239 	return 0;
240 }
241 
242 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
243 			    struct amdgpu_fpriv *fpriv,
244 			    struct drm_file *filp,
245 			    enum drm_sched_priority priority,
246 			    uint32_t *id)
247 {
248 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
249 	struct amdgpu_ctx *ctx;
250 	int r;
251 
252 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
253 	if (!ctx)
254 		return -ENOMEM;
255 
256 	mutex_lock(&mgr->lock);
257 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
258 	if (r < 0) {
259 		mutex_unlock(&mgr->lock);
260 		kfree(ctx);
261 		return r;
262 	}
263 
264 	*id = (uint32_t)r;
265 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
266 	if (r) {
267 		idr_remove(&mgr->ctx_handles, *id);
268 		*id = 0;
269 		kfree(ctx);
270 	}
271 	mutex_unlock(&mgr->lock);
272 	return r;
273 }
274 
275 static void amdgpu_ctx_do_release(struct kref *ref)
276 {
277 	struct amdgpu_ctx *ctx;
278 	unsigned num_entities;
279 	u32 i;
280 
281 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
282 
283 	num_entities = 0;
284 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
285 		num_entities += amdgpu_ctx_num_entities[i];
286 
287 	for (i = 0; i < num_entities; i++)
288 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
289 
290 	amdgpu_ctx_fini(ref);
291 }
292 
293 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
294 {
295 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
296 	struct amdgpu_ctx *ctx;
297 
298 	mutex_lock(&mgr->lock);
299 	ctx = idr_remove(&mgr->ctx_handles, id);
300 	if (ctx)
301 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
302 	mutex_unlock(&mgr->lock);
303 	return ctx ? 0 : -EINVAL;
304 }
305 
306 static int amdgpu_ctx_query(struct amdgpu_device *adev,
307 			    struct amdgpu_fpriv *fpriv, uint32_t id,
308 			    union drm_amdgpu_ctx_out *out)
309 {
310 	struct amdgpu_ctx *ctx;
311 	struct amdgpu_ctx_mgr *mgr;
312 	unsigned reset_counter;
313 
314 	if (!fpriv)
315 		return -EINVAL;
316 
317 	mgr = &fpriv->ctx_mgr;
318 	mutex_lock(&mgr->lock);
319 	ctx = idr_find(&mgr->ctx_handles, id);
320 	if (!ctx) {
321 		mutex_unlock(&mgr->lock);
322 		return -EINVAL;
323 	}
324 
325 	/* TODO: these two are always zero */
326 	out->state.flags = 0x0;
327 	out->state.hangs = 0x0;
328 
329 	/* determine if a GPU reset has occured since the last call */
330 	reset_counter = atomic_read(&adev->gpu_reset_counter);
331 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
332 	if (ctx->reset_counter_query == reset_counter)
333 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
334 	else
335 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
336 	ctx->reset_counter_query = reset_counter;
337 
338 	mutex_unlock(&mgr->lock);
339 	return 0;
340 }
341 
342 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
343 	struct amdgpu_fpriv *fpriv, uint32_t id,
344 	union drm_amdgpu_ctx_out *out)
345 {
346 	struct amdgpu_ctx *ctx;
347 	struct amdgpu_ctx_mgr *mgr;
348 	uint32_t ras_counter;
349 
350 	if (!fpriv)
351 		return -EINVAL;
352 
353 	mgr = &fpriv->ctx_mgr;
354 	mutex_lock(&mgr->lock);
355 	ctx = idr_find(&mgr->ctx_handles, id);
356 	if (!ctx) {
357 		mutex_unlock(&mgr->lock);
358 		return -EINVAL;
359 	}
360 
361 	out->state.flags = 0x0;
362 	out->state.hangs = 0x0;
363 
364 	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
365 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
366 
367 	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
368 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
369 
370 	if (atomic_read(&ctx->guilty))
371 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
372 
373 	/*query ue count*/
374 	ras_counter = amdgpu_ras_query_error_count(adev, false);
375 	/*ras counter is monotonic increasing*/
376 	if (ras_counter != ctx->ras_counter_ue) {
377 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
378 		ctx->ras_counter_ue = ras_counter;
379 	}
380 
381 	/*query ce count*/
382 	ras_counter = amdgpu_ras_query_error_count(adev, true);
383 	if (ras_counter != ctx->ras_counter_ce) {
384 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
385 		ctx->ras_counter_ce = ras_counter;
386 	}
387 
388 	mutex_unlock(&mgr->lock);
389 	return 0;
390 }
391 
392 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
393 		     struct drm_file *filp)
394 {
395 	int r;
396 	uint32_t id;
397 	enum drm_sched_priority priority;
398 
399 	union drm_amdgpu_ctx *args = data;
400 	struct amdgpu_device *adev = dev->dev_private;
401 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
402 
403 	r = 0;
404 	id = args->in.ctx_id;
405 	priority = amdgpu_to_sched_priority(args->in.priority);
406 
407 	/* For backwards compatibility reasons, we need to accept
408 	 * ioctls with garbage in the priority field */
409 	if (priority == DRM_SCHED_PRIORITY_INVALID)
410 		priority = DRM_SCHED_PRIORITY_NORMAL;
411 
412 	switch (args->in.op) {
413 	case AMDGPU_CTX_OP_ALLOC_CTX:
414 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
415 		args->out.alloc.ctx_id = id;
416 		break;
417 	case AMDGPU_CTX_OP_FREE_CTX:
418 		r = amdgpu_ctx_free(fpriv, id);
419 		break;
420 	case AMDGPU_CTX_OP_QUERY_STATE:
421 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
422 		break;
423 	case AMDGPU_CTX_OP_QUERY_STATE2:
424 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
425 		break;
426 	default:
427 		return -EINVAL;
428 	}
429 
430 	return r;
431 }
432 
433 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
434 {
435 	struct amdgpu_ctx *ctx;
436 	struct amdgpu_ctx_mgr *mgr;
437 
438 	if (!fpriv)
439 		return NULL;
440 
441 	mgr = &fpriv->ctx_mgr;
442 
443 	mutex_lock(&mgr->lock);
444 	ctx = idr_find(&mgr->ctx_handles, id);
445 	if (ctx)
446 		kref_get(&ctx->refcount);
447 	mutex_unlock(&mgr->lock);
448 	return ctx;
449 }
450 
451 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
452 {
453 	if (ctx == NULL)
454 		return -EINVAL;
455 
456 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
457 	return 0;
458 }
459 
460 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
461 			  struct drm_sched_entity *entity,
462 			  struct dma_fence *fence, uint64_t* handle)
463 {
464 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
465 	uint64_t seq = centity->sequence;
466 	struct dma_fence *other = NULL;
467 	unsigned idx = 0;
468 
469 	idx = seq & (amdgpu_sched_jobs - 1);
470 	other = centity->fences[idx];
471 	if (other)
472 		BUG_ON(!dma_fence_is_signaled(other));
473 
474 	dma_fence_get(fence);
475 
476 	spin_lock(&ctx->ring_lock);
477 	centity->fences[idx] = fence;
478 	centity->sequence++;
479 	spin_unlock(&ctx->ring_lock);
480 
481 	dma_fence_put(other);
482 	if (handle)
483 		*handle = seq;
484 }
485 
486 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
487 				       struct drm_sched_entity *entity,
488 				       uint64_t seq)
489 {
490 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
491 	struct dma_fence *fence;
492 
493 	spin_lock(&ctx->ring_lock);
494 
495 	if (seq == ~0ull)
496 		seq = centity->sequence - 1;
497 
498 	if (seq >= centity->sequence) {
499 		spin_unlock(&ctx->ring_lock);
500 		return ERR_PTR(-EINVAL);
501 	}
502 
503 
504 	if (seq + amdgpu_sched_jobs < centity->sequence) {
505 		spin_unlock(&ctx->ring_lock);
506 		return NULL;
507 	}
508 
509 	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
510 	spin_unlock(&ctx->ring_lock);
511 
512 	return fence;
513 }
514 
515 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
516 				  enum drm_sched_priority priority)
517 {
518 	unsigned num_entities = amdgput_ctx_total_num_entities();
519 	enum drm_sched_priority ctx_prio;
520 	unsigned i;
521 
522 	ctx->override_priority = priority;
523 
524 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
525 			ctx->init_priority : ctx->override_priority;
526 
527 	for (i = 0; i < num_entities; i++) {
528 		struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
529 
530 		drm_sched_entity_set_priority(entity, ctx_prio);
531 	}
532 }
533 
534 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
535 			       struct drm_sched_entity *entity)
536 {
537 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
538 	unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
539 	struct dma_fence *other = centity->fences[idx];
540 
541 	if (other) {
542 		signed long r;
543 		r = dma_fence_wait(other, true);
544 		if (r < 0) {
545 			if (r != -ERESTARTSYS)
546 				DRM_ERROR("Error (%ld) waiting for fence!\n", r);
547 
548 			return r;
549 		}
550 	}
551 
552 	return 0;
553 }
554 
555 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
556 {
557 	mutex_init(&mgr->lock);
558 	idr_init(&mgr->ctx_handles);
559 }
560 
561 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
562 {
563 	unsigned num_entities = amdgput_ctx_total_num_entities();
564 	struct amdgpu_ctx *ctx;
565 	struct idr *idp;
566 	uint32_t id, i;
567 
568 	idp = &mgr->ctx_handles;
569 
570 	mutex_lock(&mgr->lock);
571 	idr_for_each_entry(idp, ctx, id) {
572 		for (i = 0; i < num_entities; i++) {
573 			struct drm_sched_entity *entity;
574 
575 			entity = &ctx->entities[0][i].entity;
576 			timeout = drm_sched_entity_flush(entity, timeout);
577 		}
578 	}
579 	mutex_unlock(&mgr->lock);
580 	return timeout;
581 }
582 
583 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
584 {
585 	unsigned num_entities = amdgput_ctx_total_num_entities();
586 	struct amdgpu_ctx *ctx;
587 	struct idr *idp;
588 	uint32_t id, i;
589 
590 	idp = &mgr->ctx_handles;
591 
592 	idr_for_each_entry(idp, ctx, id) {
593 		if (kref_read(&ctx->refcount) != 1) {
594 			DRM_ERROR("ctx %p is still alive\n", ctx);
595 			continue;
596 		}
597 
598 		for (i = 0; i < num_entities; i++)
599 			drm_sched_entity_fini(&ctx->entities[0][i].entity);
600 	}
601 }
602 
603 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
604 {
605 	struct amdgpu_ctx *ctx;
606 	struct idr *idp;
607 	uint32_t id;
608 
609 	amdgpu_ctx_mgr_entity_fini(mgr);
610 
611 	idp = &mgr->ctx_handles;
612 
613 	idr_for_each_entry(idp, ctx, id) {
614 		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
615 			DRM_ERROR("ctx %p is still alive\n", ctx);
616 	}
617 
618 	idr_destroy(&mgr->ctx_handles);
619 	mutex_destroy(&mgr->lock);
620 }
621