1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <drm/drmP.h>
30 #include <drm/drm.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_vcn.h"
35 #include "soc15d.h"
36 #include "soc15_common.h"
37 
38 #include "soc15ip.h"
39 #include "raven1/VCN/vcn_1_0_offset.h"
40 
41 /* 1 second timeout */
42 #define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
43 
44 /* Firmware Names */
45 #define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
46 
47 MODULE_FIRMWARE(FIRMWARE_RAVEN);
48 
49 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
50 
51 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
52 {
53 	struct amdgpu_ring *ring;
54 	struct amd_sched_rq *rq;
55 	unsigned long bo_size;
56 	const char *fw_name;
57 	const struct common_firmware_header *hdr;
58 	unsigned version_major, version_minor, family_id;
59 	int r;
60 
61 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
62 
63 	switch (adev->asic_type) {
64 	case CHIP_RAVEN:
65 		fw_name = FIRMWARE_RAVEN;
66 		break;
67 	default:
68 		return -EINVAL;
69 	}
70 
71 	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
72 	if (r) {
73 		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
74 			fw_name);
75 		return r;
76 	}
77 
78 	r = amdgpu_ucode_validate(adev->vcn.fw);
79 	if (r) {
80 		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
81 			fw_name);
82 		release_firmware(adev->vcn.fw);
83 		adev->vcn.fw = NULL;
84 		return r;
85 	}
86 
87 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
88 	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
89 	version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
90 	version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
91 	DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
92 		version_major, version_minor, family_id);
93 
94 
95 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
96 		  +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
97 		  +  AMDGPU_VCN_SESSION_SIZE * 40;
98 	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
99 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
100 				    &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
101 	if (r) {
102 		dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
103 		return r;
104 	}
105 
106 	ring = &adev->vcn.ring_dec;
107 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
108 	r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
109 				  rq, amdgpu_sched_jobs, NULL);
110 	if (r != 0) {
111 		DRM_ERROR("Failed setting up VCN dec run queue.\n");
112 		return r;
113 	}
114 
115 	ring = &adev->vcn.ring_enc[0];
116 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
117 	r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
118 				  rq, amdgpu_sched_jobs, NULL);
119 	if (r != 0) {
120 		DRM_ERROR("Failed setting up VCN enc run queue.\n");
121 		return r;
122 	}
123 
124 	return 0;
125 }
126 
127 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
128 {
129 	int i;
130 
131 	kfree(adev->vcn.saved_bo);
132 
133 	amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
134 
135 	amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
136 
137 	amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
138 			      &adev->vcn.gpu_addr,
139 			      (void **)&adev->vcn.cpu_addr);
140 
141 	amdgpu_ring_fini(&adev->vcn.ring_dec);
142 
143 	for (i = 0; i < adev->vcn.num_enc_rings; ++i)
144 		amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
145 
146 	release_firmware(adev->vcn.fw);
147 
148 	return 0;
149 }
150 
151 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
152 {
153 	unsigned size;
154 	void *ptr;
155 
156 	if (adev->vcn.vcpu_bo == NULL)
157 		return 0;
158 
159 	cancel_delayed_work_sync(&adev->vcn.idle_work);
160 
161 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
162 	ptr = adev->vcn.cpu_addr;
163 
164 	adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
165 	if (!adev->vcn.saved_bo)
166 		return -ENOMEM;
167 
168 	memcpy_fromio(adev->vcn.saved_bo, ptr, size);
169 
170 	return 0;
171 }
172 
173 int amdgpu_vcn_resume(struct amdgpu_device *adev)
174 {
175 	unsigned size;
176 	void *ptr;
177 
178 	if (adev->vcn.vcpu_bo == NULL)
179 		return -EINVAL;
180 
181 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
182 	ptr = adev->vcn.cpu_addr;
183 
184 	if (adev->vcn.saved_bo != NULL) {
185 		memcpy_toio(ptr, adev->vcn.saved_bo, size);
186 		kfree(adev->vcn.saved_bo);
187 		adev->vcn.saved_bo = NULL;
188 	} else {
189 		const struct common_firmware_header *hdr;
190 		unsigned offset;
191 
192 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
193 		offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
194 		memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
195 			    le32_to_cpu(hdr->ucode_size_bytes));
196 		size -= le32_to_cpu(hdr->ucode_size_bytes);
197 		ptr += le32_to_cpu(hdr->ucode_size_bytes);
198 		memset_io(ptr, 0, size);
199 	}
200 
201 	return 0;
202 }
203 
204 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
205 {
206 	struct amdgpu_device *adev =
207 		container_of(work, struct amdgpu_device, vcn.idle_work.work);
208 	unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
209 
210 	if (fences == 0) {
211 		if (adev->pm.dpm_enabled) {
212 			/* might be used when with pg/cg
213 			amdgpu_dpm_enable_uvd(adev, false);
214 			*/
215 		}
216 	} else {
217 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
218 	}
219 }
220 
221 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
222 {
223 	struct amdgpu_device *adev = ring->adev;
224 	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
225 
226 	if (set_clocks && adev->pm.dpm_enabled) {
227 		/* might be used when with pg/cg
228 		amdgpu_dpm_enable_uvd(adev, true);
229 		*/
230 	}
231 }
232 
233 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
234 {
235 	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
236 }
237 
238 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
239 {
240 	struct amdgpu_device *adev = ring->adev;
241 	uint32_t tmp = 0;
242 	unsigned i;
243 	int r;
244 
245 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
246 	r = amdgpu_ring_alloc(ring, 3);
247 	if (r) {
248 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
249 			  ring->idx, r);
250 		return r;
251 	}
252 	amdgpu_ring_write(ring,
253 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
254 	amdgpu_ring_write(ring, 0xDEADBEEF);
255 	amdgpu_ring_commit(ring);
256 	for (i = 0; i < adev->usec_timeout; i++) {
257 		tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
258 		if (tmp == 0xDEADBEEF)
259 			break;
260 		DRM_UDELAY(1);
261 	}
262 
263 	if (i < adev->usec_timeout) {
264 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
265 			 ring->idx, i);
266 	} else {
267 		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
268 			  ring->idx, tmp);
269 		r = -EINVAL;
270 	}
271 	return r;
272 }
273 
274 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
275 			       bool direct, struct dma_fence **fence)
276 {
277 	struct ttm_operation_ctx ctx = { true, false };
278 	struct ttm_validate_buffer tv;
279 	struct ww_acquire_ctx ticket;
280 	struct list_head head;
281 	struct amdgpu_job *job;
282 	struct amdgpu_ib *ib;
283 	struct dma_fence *f = NULL;
284 	struct amdgpu_device *adev = ring->adev;
285 	uint64_t addr;
286 	int i, r;
287 
288 	memset(&tv, 0, sizeof(tv));
289 	tv.bo = &bo->tbo;
290 
291 	INIT_LIST_HEAD(&head);
292 	list_add(&tv.head, &head);
293 
294 	r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
295 	if (r)
296 		return r;
297 
298 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
299 	if (r)
300 		goto err;
301 
302 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
303 	if (r)
304 		goto err;
305 
306 	ib = &job->ibs[0];
307 	addr = amdgpu_bo_gpu_offset(bo);
308 	ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
309 	ib->ptr[1] = addr;
310 	ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
311 	ib->ptr[3] = addr >> 32;
312 	ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
313 	ib->ptr[5] = 0;
314 	for (i = 6; i < 16; i += 2) {
315 		ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
316 		ib->ptr[i+1] = 0;
317 	}
318 	ib->length_dw = 16;
319 
320 	if (direct) {
321 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
322 		job->fence = dma_fence_get(f);
323 		if (r)
324 			goto err_free;
325 
326 		amdgpu_job_free(job);
327 	} else {
328 		r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
329 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
330 		if (r)
331 			goto err_free;
332 	}
333 
334 	ttm_eu_fence_buffer_objects(&ticket, &head, f);
335 
336 	if (fence)
337 		*fence = dma_fence_get(f);
338 	amdgpu_bo_unref(&bo);
339 	dma_fence_put(f);
340 
341 	return 0;
342 
343 err_free:
344 	amdgpu_job_free(job);
345 
346 err:
347 	ttm_eu_backoff_reservation(&ticket, &head);
348 	return r;
349 }
350 
351 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
352 			      struct dma_fence **fence)
353 {
354 	struct amdgpu_device *adev = ring->adev;
355 	struct amdgpu_bo *bo;
356 	uint32_t *msg;
357 	int r, i;
358 
359 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
360 			     AMDGPU_GEM_DOMAIN_VRAM,
361 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
362 			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
363 			     NULL, NULL, 0, &bo);
364 	if (r)
365 		return r;
366 
367 	r = amdgpu_bo_reserve(bo, false);
368 	if (r) {
369 		amdgpu_bo_unref(&bo);
370 		return r;
371 	}
372 
373 	r = amdgpu_bo_kmap(bo, (void **)&msg);
374 	if (r) {
375 		amdgpu_bo_unreserve(bo);
376 		amdgpu_bo_unref(&bo);
377 		return r;
378 	}
379 
380 	msg[0] = cpu_to_le32(0x00000028);
381 	msg[1] = cpu_to_le32(0x00000038);
382 	msg[2] = cpu_to_le32(0x00000001);
383 	msg[3] = cpu_to_le32(0x00000000);
384 	msg[4] = cpu_to_le32(handle);
385 	msg[5] = cpu_to_le32(0x00000000);
386 	msg[6] = cpu_to_le32(0x00000001);
387 	msg[7] = cpu_to_le32(0x00000028);
388 	msg[8] = cpu_to_le32(0x00000010);
389 	msg[9] = cpu_to_le32(0x00000000);
390 	msg[10] = cpu_to_le32(0x00000007);
391 	msg[11] = cpu_to_le32(0x00000000);
392 	msg[12] = cpu_to_le32(0x00000780);
393 	msg[13] = cpu_to_le32(0x00000440);
394 	for (i = 14; i < 1024; ++i)
395 		msg[i] = cpu_to_le32(0x0);
396 
397 	amdgpu_bo_kunmap(bo);
398 	amdgpu_bo_unreserve(bo);
399 
400 	return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
401 }
402 
403 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
404 			       bool direct, struct dma_fence **fence)
405 {
406 	struct amdgpu_device *adev = ring->adev;
407 	struct amdgpu_bo *bo;
408 	uint32_t *msg;
409 	int r, i;
410 
411 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
412 			     AMDGPU_GEM_DOMAIN_VRAM,
413 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
414 			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
415 			     NULL, NULL, 0, &bo);
416 	if (r)
417 		return r;
418 
419 	r = amdgpu_bo_reserve(bo, false);
420 	if (r) {
421 		amdgpu_bo_unref(&bo);
422 		return r;
423 	}
424 
425 	r = amdgpu_bo_kmap(bo, (void **)&msg);
426 	if (r) {
427 		amdgpu_bo_unreserve(bo);
428 		amdgpu_bo_unref(&bo);
429 		return r;
430 	}
431 
432 	msg[0] = cpu_to_le32(0x00000028);
433 	msg[1] = cpu_to_le32(0x00000018);
434 	msg[2] = cpu_to_le32(0x00000000);
435 	msg[3] = cpu_to_le32(0x00000002);
436 	msg[4] = cpu_to_le32(handle);
437 	msg[5] = cpu_to_le32(0x00000000);
438 	for (i = 6; i < 1024; ++i)
439 		msg[i] = cpu_to_le32(0x0);
440 
441 	amdgpu_bo_kunmap(bo);
442 	amdgpu_bo_unreserve(bo);
443 
444 	return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
445 }
446 
447 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
448 {
449 	struct dma_fence *fence;
450 	long r;
451 
452 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
453 	if (r) {
454 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
455 		goto error;
456 	}
457 
458 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
459 	if (r) {
460 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
461 		goto error;
462 	}
463 
464 	r = dma_fence_wait_timeout(fence, false, timeout);
465 	if (r == 0) {
466 		DRM_ERROR("amdgpu: IB test timed out.\n");
467 		r = -ETIMEDOUT;
468 	} else if (r < 0) {
469 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
470 	} else {
471 		DRM_DEBUG("ib test on ring %d succeeded\n",  ring->idx);
472 		r = 0;
473 	}
474 
475 	dma_fence_put(fence);
476 
477 error:
478 	return r;
479 }
480 
481 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
482 {
483 	struct amdgpu_device *adev = ring->adev;
484 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
485 	unsigned i;
486 	int r;
487 
488 	r = amdgpu_ring_alloc(ring, 16);
489 	if (r) {
490 		DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
491 			  ring->idx, r);
492 		return r;
493 	}
494 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
495 	amdgpu_ring_commit(ring);
496 
497 	for (i = 0; i < adev->usec_timeout; i++) {
498 		if (amdgpu_ring_get_rptr(ring) != rptr)
499 			break;
500 		DRM_UDELAY(1);
501 	}
502 
503 	if (i < adev->usec_timeout) {
504 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
505 			 ring->idx, i);
506 	} else {
507 		DRM_ERROR("amdgpu: ring %d test failed\n",
508 			  ring->idx);
509 		r = -ETIMEDOUT;
510 	}
511 
512 	return r;
513 }
514 
515 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
516 			      struct dma_fence **fence)
517 {
518 	const unsigned ib_size_dw = 16;
519 	struct amdgpu_job *job;
520 	struct amdgpu_ib *ib;
521 	struct dma_fence *f = NULL;
522 	uint64_t dummy;
523 	int i, r;
524 
525 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
526 	if (r)
527 		return r;
528 
529 	ib = &job->ibs[0];
530 	dummy = ib->gpu_addr + 1024;
531 
532 	ib->length_dw = 0;
533 	ib->ptr[ib->length_dw++] = 0x00000018;
534 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
535 	ib->ptr[ib->length_dw++] = handle;
536 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
537 	ib->ptr[ib->length_dw++] = dummy;
538 	ib->ptr[ib->length_dw++] = 0x0000000b;
539 
540 	ib->ptr[ib->length_dw++] = 0x00000014;
541 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
542 	ib->ptr[ib->length_dw++] = 0x0000001c;
543 	ib->ptr[ib->length_dw++] = 0x00000000;
544 	ib->ptr[ib->length_dw++] = 0x00000000;
545 
546 	ib->ptr[ib->length_dw++] = 0x00000008;
547 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
548 
549 	for (i = ib->length_dw; i < ib_size_dw; ++i)
550 		ib->ptr[i] = 0x0;
551 
552 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
553 	job->fence = dma_fence_get(f);
554 	if (r)
555 		goto err;
556 
557 	amdgpu_job_free(job);
558 	if (fence)
559 		*fence = dma_fence_get(f);
560 	dma_fence_put(f);
561 
562 	return 0;
563 
564 err:
565 	amdgpu_job_free(job);
566 	return r;
567 }
568 
569 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
570 				struct dma_fence **fence)
571 {
572 	const unsigned ib_size_dw = 16;
573 	struct amdgpu_job *job;
574 	struct amdgpu_ib *ib;
575 	struct dma_fence *f = NULL;
576 	uint64_t dummy;
577 	int i, r;
578 
579 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
580 	if (r)
581 		return r;
582 
583 	ib = &job->ibs[0];
584 	dummy = ib->gpu_addr + 1024;
585 
586 	ib->length_dw = 0;
587 	ib->ptr[ib->length_dw++] = 0x00000018;
588 	ib->ptr[ib->length_dw++] = 0x00000001;
589 	ib->ptr[ib->length_dw++] = handle;
590 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
591 	ib->ptr[ib->length_dw++] = dummy;
592 	ib->ptr[ib->length_dw++] = 0x0000000b;
593 
594 	ib->ptr[ib->length_dw++] = 0x00000014;
595 	ib->ptr[ib->length_dw++] = 0x00000002;
596 	ib->ptr[ib->length_dw++] = 0x0000001c;
597 	ib->ptr[ib->length_dw++] = 0x00000000;
598 	ib->ptr[ib->length_dw++] = 0x00000000;
599 
600 	ib->ptr[ib->length_dw++] = 0x00000008;
601 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
602 
603 	for (i = ib->length_dw; i < ib_size_dw; ++i)
604 		ib->ptr[i] = 0x0;
605 
606 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
607 	job->fence = dma_fence_get(f);
608 	if (r)
609 		goto err;
610 
611 	amdgpu_job_free(job);
612 	if (fence)
613 		*fence = dma_fence_get(f);
614 	dma_fence_put(f);
615 
616 	return 0;
617 
618 err:
619 	amdgpu_job_free(job);
620 	return r;
621 }
622 
623 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
624 {
625 	struct dma_fence *fence = NULL;
626 	long r;
627 
628 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
629 	if (r) {
630 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
631 		goto error;
632 	}
633 
634 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
635 	if (r) {
636 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
637 		goto error;
638 	}
639 
640 	r = dma_fence_wait_timeout(fence, false, timeout);
641 	if (r == 0) {
642 		DRM_ERROR("amdgpu: IB test timed out.\n");
643 		r = -ETIMEDOUT;
644 	} else if (r < 0) {
645 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
646 	} else {
647 		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
648 		r = 0;
649 	}
650 error:
651 	dma_fence_put(fence);
652 	return r;
653 }
654