1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <drm/drmP.h>
30 #include <drm/drm.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_vcn.h"
35 #include "soc15d.h"
36 #include "soc15_common.h"
37 
38 #include "vega10/soc15ip.h"
39 #include "raven1/VCN/vcn_1_0_offset.h"
40 
41 /* 1 second timeout */
42 #define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
43 
44 /* Firmware Names */
45 #define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
46 
47 MODULE_FIRMWARE(FIRMWARE_RAVEN);
48 
49 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
50 
51 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
52 {
53 	struct amdgpu_ring *ring;
54 	struct amd_sched_rq *rq;
55 	unsigned long bo_size;
56 	const char *fw_name;
57 	const struct common_firmware_header *hdr;
58 	unsigned version_major, version_minor, family_id;
59 	int r;
60 
61 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
62 
63 	switch (adev->asic_type) {
64 	case CHIP_RAVEN:
65 		fw_name = FIRMWARE_RAVEN;
66 		break;
67 	default:
68 		return -EINVAL;
69 	}
70 
71 	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
72 	if (r) {
73 		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
74 			fw_name);
75 		return r;
76 	}
77 
78 	r = amdgpu_ucode_validate(adev->vcn.fw);
79 	if (r) {
80 		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
81 			fw_name);
82 		release_firmware(adev->vcn.fw);
83 		adev->vcn.fw = NULL;
84 		return r;
85 	}
86 
87 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
88 	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
89 	version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
90 	version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
91 	DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
92 		version_major, version_minor, family_id);
93 
94 
95 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
96 		  +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
97 		  +  AMDGPU_VCN_SESSION_SIZE * 40;
98 	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
99 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
100 				    &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
101 	if (r) {
102 		dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
103 		return r;
104 	}
105 
106 	ring = &adev->vcn.ring_dec;
107 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
108 	r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
109 				  rq, amdgpu_sched_jobs);
110 	if (r != 0) {
111 		DRM_ERROR("Failed setting up VCN dec run queue.\n");
112 		return r;
113 	}
114 
115 	ring = &adev->vcn.ring_enc[0];
116 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
117 	r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
118 				  rq, amdgpu_sched_jobs);
119 	if (r != 0) {
120 		DRM_ERROR("Failed setting up VCN enc run queue.\n");
121 		return r;
122 	}
123 
124 	return 0;
125 }
126 
127 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
128 {
129 	int i;
130 
131 	kfree(adev->vcn.saved_bo);
132 
133 	amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
134 
135 	amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
136 
137 	amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
138 			      &adev->vcn.gpu_addr,
139 			      (void **)&adev->vcn.cpu_addr);
140 
141 	amdgpu_ring_fini(&adev->vcn.ring_dec);
142 
143 	for (i = 0; i < adev->vcn.num_enc_rings; ++i)
144 		amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
145 
146 	release_firmware(adev->vcn.fw);
147 
148 	return 0;
149 }
150 
151 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
152 {
153 	unsigned size;
154 	void *ptr;
155 
156 	if (adev->vcn.vcpu_bo == NULL)
157 		return 0;
158 
159 	cancel_delayed_work_sync(&adev->vcn.idle_work);
160 
161 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
162 	ptr = adev->vcn.cpu_addr;
163 
164 	adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
165 	if (!adev->vcn.saved_bo)
166 		return -ENOMEM;
167 
168 	memcpy_fromio(adev->vcn.saved_bo, ptr, size);
169 
170 	return 0;
171 }
172 
173 int amdgpu_vcn_resume(struct amdgpu_device *adev)
174 {
175 	unsigned size;
176 	void *ptr;
177 
178 	if (adev->vcn.vcpu_bo == NULL)
179 		return -EINVAL;
180 
181 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
182 	ptr = adev->vcn.cpu_addr;
183 
184 	if (adev->vcn.saved_bo != NULL) {
185 		memcpy_toio(ptr, adev->vcn.saved_bo, size);
186 		kfree(adev->vcn.saved_bo);
187 		adev->vcn.saved_bo = NULL;
188 	} else {
189 		const struct common_firmware_header *hdr;
190 		unsigned offset;
191 
192 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
193 		offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
194 		memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
195 			    le32_to_cpu(hdr->ucode_size_bytes));
196 		size -= le32_to_cpu(hdr->ucode_size_bytes);
197 		ptr += le32_to_cpu(hdr->ucode_size_bytes);
198 		memset_io(ptr, 0, size);
199 	}
200 
201 	return 0;
202 }
203 
204 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
205 {
206 	struct amdgpu_device *adev =
207 		container_of(work, struct amdgpu_device, vcn.idle_work.work);
208 	unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
209 
210 	if (fences == 0) {
211 		if (adev->pm.dpm_enabled) {
212 			amdgpu_dpm_enable_uvd(adev, false);
213 		} else {
214 			amdgpu_asic_set_uvd_clocks(adev, 0, 0);
215 		}
216 	} else {
217 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
218 	}
219 }
220 
221 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
222 {
223 	struct amdgpu_device *adev = ring->adev;
224 	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
225 
226 	if (set_clocks) {
227 		if (adev->pm.dpm_enabled) {
228 			amdgpu_dpm_enable_uvd(adev, true);
229 		} else {
230 			amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
231 		}
232 	}
233 }
234 
235 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
236 {
237 	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
238 }
239 
240 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
241 {
242 	struct amdgpu_device *adev = ring->adev;
243 	uint32_t tmp = 0;
244 	unsigned i;
245 	int r;
246 
247 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
248 	r = amdgpu_ring_alloc(ring, 3);
249 	if (r) {
250 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
251 			  ring->idx, r);
252 		return r;
253 	}
254 	amdgpu_ring_write(ring,
255 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
256 	amdgpu_ring_write(ring, 0xDEADBEEF);
257 	amdgpu_ring_commit(ring);
258 	for (i = 0; i < adev->usec_timeout; i++) {
259 		tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
260 		if (tmp == 0xDEADBEEF)
261 			break;
262 		DRM_UDELAY(1);
263 	}
264 
265 	if (i < adev->usec_timeout) {
266 		DRM_INFO("ring test on %d succeeded in %d usecs\n",
267 			 ring->idx, i);
268 	} else {
269 		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
270 			  ring->idx, tmp);
271 		r = -EINVAL;
272 	}
273 	return r;
274 }
275 
276 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
277 			       bool direct, struct dma_fence **fence)
278 {
279 	struct ttm_validate_buffer tv;
280 	struct ww_acquire_ctx ticket;
281 	struct list_head head;
282 	struct amdgpu_job *job;
283 	struct amdgpu_ib *ib;
284 	struct dma_fence *f = NULL;
285 	struct amdgpu_device *adev = ring->adev;
286 	uint64_t addr;
287 	int i, r;
288 
289 	memset(&tv, 0, sizeof(tv));
290 	tv.bo = &bo->tbo;
291 
292 	INIT_LIST_HEAD(&head);
293 	list_add(&tv.head, &head);
294 
295 	r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
296 	if (r)
297 		return r;
298 
299 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
300 	if (r)
301 		goto err;
302 
303 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
304 	if (r)
305 		goto err;
306 
307 	ib = &job->ibs[0];
308 	addr = amdgpu_bo_gpu_offset(bo);
309 	ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
310 	ib->ptr[1] = addr;
311 	ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
312 	ib->ptr[3] = addr >> 32;
313 	ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
314 	ib->ptr[5] = 0;
315 	for (i = 6; i < 16; i += 2) {
316 		ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
317 		ib->ptr[i+1] = 0;
318 	}
319 	ib->length_dw = 16;
320 
321 	if (direct) {
322 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
323 		job->fence = dma_fence_get(f);
324 		if (r)
325 			goto err_free;
326 
327 		amdgpu_job_free(job);
328 	} else {
329 		r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
330 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
331 		if (r)
332 			goto err_free;
333 	}
334 
335 	ttm_eu_fence_buffer_objects(&ticket, &head, f);
336 
337 	if (fence)
338 		*fence = dma_fence_get(f);
339 	amdgpu_bo_unref(&bo);
340 	dma_fence_put(f);
341 
342 	return 0;
343 
344 err_free:
345 	amdgpu_job_free(job);
346 
347 err:
348 	ttm_eu_backoff_reservation(&ticket, &head);
349 	return r;
350 }
351 
352 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
353 			      struct dma_fence **fence)
354 {
355 	struct amdgpu_device *adev = ring->adev;
356 	struct amdgpu_bo *bo;
357 	uint32_t *msg;
358 	int r, i;
359 
360 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
361 			     AMDGPU_GEM_DOMAIN_VRAM,
362 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
363 			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
364 			     NULL, NULL, &bo);
365 	if (r)
366 		return r;
367 
368 	r = amdgpu_bo_reserve(bo, false);
369 	if (r) {
370 		amdgpu_bo_unref(&bo);
371 		return r;
372 	}
373 
374 	r = amdgpu_bo_kmap(bo, (void **)&msg);
375 	if (r) {
376 		amdgpu_bo_unreserve(bo);
377 		amdgpu_bo_unref(&bo);
378 		return r;
379 	}
380 
381 	msg[0] = cpu_to_le32(0x00000028);
382 	msg[1] = cpu_to_le32(0x00000038);
383 	msg[2] = cpu_to_le32(0x00000001);
384 	msg[3] = cpu_to_le32(0x00000000);
385 	msg[4] = cpu_to_le32(handle);
386 	msg[5] = cpu_to_le32(0x00000000);
387 	msg[6] = cpu_to_le32(0x00000001);
388 	msg[7] = cpu_to_le32(0x00000028);
389 	msg[8] = cpu_to_le32(0x00000010);
390 	msg[9] = cpu_to_le32(0x00000000);
391 	msg[10] = cpu_to_le32(0x00000007);
392 	msg[11] = cpu_to_le32(0x00000000);
393 	msg[12] = cpu_to_le32(0x00000780);
394 	msg[13] = cpu_to_le32(0x00000440);
395 	for (i = 14; i < 1024; ++i)
396 		msg[i] = cpu_to_le32(0x0);
397 
398 	amdgpu_bo_kunmap(bo);
399 	amdgpu_bo_unreserve(bo);
400 
401 	return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
402 }
403 
404 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
405 			       bool direct, struct dma_fence **fence)
406 {
407 	struct amdgpu_device *adev = ring->adev;
408 	struct amdgpu_bo *bo;
409 	uint32_t *msg;
410 	int r, i;
411 
412 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
413 			     AMDGPU_GEM_DOMAIN_VRAM,
414 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
415 			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
416 			     NULL, NULL, &bo);
417 	if (r)
418 		return r;
419 
420 	r = amdgpu_bo_reserve(bo, false);
421 	if (r) {
422 		amdgpu_bo_unref(&bo);
423 		return r;
424 	}
425 
426 	r = amdgpu_bo_kmap(bo, (void **)&msg);
427 	if (r) {
428 		amdgpu_bo_unreserve(bo);
429 		amdgpu_bo_unref(&bo);
430 		return r;
431 	}
432 
433 	msg[0] = cpu_to_le32(0x00000028);
434 	msg[1] = cpu_to_le32(0x00000018);
435 	msg[2] = cpu_to_le32(0x00000000);
436 	msg[3] = cpu_to_le32(0x00000002);
437 	msg[4] = cpu_to_le32(handle);
438 	msg[5] = cpu_to_le32(0x00000000);
439 	for (i = 6; i < 1024; ++i)
440 		msg[i] = cpu_to_le32(0x0);
441 
442 	amdgpu_bo_kunmap(bo);
443 	amdgpu_bo_unreserve(bo);
444 
445 	return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
446 }
447 
448 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
449 {
450 	struct dma_fence *fence;
451 	long r;
452 
453 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
454 	if (r) {
455 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
456 		goto error;
457 	}
458 
459 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
460 	if (r) {
461 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
462 		goto error;
463 	}
464 
465 	r = dma_fence_wait_timeout(fence, false, timeout);
466 	if (r == 0) {
467 		DRM_ERROR("amdgpu: IB test timed out.\n");
468 		r = -ETIMEDOUT;
469 	} else if (r < 0) {
470 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
471 	} else {
472 		DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
473 		r = 0;
474 	}
475 
476 	dma_fence_put(fence);
477 
478 error:
479 	return r;
480 }
481 
482 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
483 {
484 	struct amdgpu_device *adev = ring->adev;
485 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
486 	unsigned i;
487 	int r;
488 
489 	r = amdgpu_ring_alloc(ring, 16);
490 	if (r) {
491 		DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
492 			  ring->idx, r);
493 		return r;
494 	}
495 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
496 	amdgpu_ring_commit(ring);
497 
498 	for (i = 0; i < adev->usec_timeout; i++) {
499 		if (amdgpu_ring_get_rptr(ring) != rptr)
500 			break;
501 		DRM_UDELAY(1);
502 	}
503 
504 	if (i < adev->usec_timeout) {
505 		DRM_INFO("ring test on %d succeeded in %d usecs\n",
506 			 ring->idx, i);
507 	} else {
508 		DRM_ERROR("amdgpu: ring %d test failed\n",
509 			  ring->idx);
510 		r = -ETIMEDOUT;
511 	}
512 
513 	return r;
514 }
515 
516 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
517 			      struct dma_fence **fence)
518 {
519 	const unsigned ib_size_dw = 16;
520 	struct amdgpu_job *job;
521 	struct amdgpu_ib *ib;
522 	struct dma_fence *f = NULL;
523 	uint64_t dummy;
524 	int i, r;
525 
526 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
527 	if (r)
528 		return r;
529 
530 	ib = &job->ibs[0];
531 	dummy = ib->gpu_addr + 1024;
532 
533 	ib->length_dw = 0;
534 	ib->ptr[ib->length_dw++] = 0x00000018;
535 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
536 	ib->ptr[ib->length_dw++] = handle;
537 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
538 	ib->ptr[ib->length_dw++] = dummy;
539 	ib->ptr[ib->length_dw++] = 0x0000000b;
540 
541 	ib->ptr[ib->length_dw++] = 0x00000014;
542 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
543 	ib->ptr[ib->length_dw++] = 0x0000001c;
544 	ib->ptr[ib->length_dw++] = 0x00000000;
545 	ib->ptr[ib->length_dw++] = 0x00000000;
546 
547 	ib->ptr[ib->length_dw++] = 0x00000008;
548 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
549 
550 	for (i = ib->length_dw; i < ib_size_dw; ++i)
551 		ib->ptr[i] = 0x0;
552 
553 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
554 	job->fence = dma_fence_get(f);
555 	if (r)
556 		goto err;
557 
558 	amdgpu_job_free(job);
559 	if (fence)
560 		*fence = dma_fence_get(f);
561 	dma_fence_put(f);
562 
563 	return 0;
564 
565 err:
566 	amdgpu_job_free(job);
567 	return r;
568 }
569 
570 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
571 				struct dma_fence **fence)
572 {
573 	const unsigned ib_size_dw = 16;
574 	struct amdgpu_job *job;
575 	struct amdgpu_ib *ib;
576 	struct dma_fence *f = NULL;
577 	uint64_t dummy;
578 	int i, r;
579 
580 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
581 	if (r)
582 		return r;
583 
584 	ib = &job->ibs[0];
585 	dummy = ib->gpu_addr + 1024;
586 
587 	ib->length_dw = 0;
588 	ib->ptr[ib->length_dw++] = 0x00000018;
589 	ib->ptr[ib->length_dw++] = 0x00000001;
590 	ib->ptr[ib->length_dw++] = handle;
591 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
592 	ib->ptr[ib->length_dw++] = dummy;
593 	ib->ptr[ib->length_dw++] = 0x0000000b;
594 
595 	ib->ptr[ib->length_dw++] = 0x00000014;
596 	ib->ptr[ib->length_dw++] = 0x00000002;
597 	ib->ptr[ib->length_dw++] = 0x0000001c;
598 	ib->ptr[ib->length_dw++] = 0x00000000;
599 	ib->ptr[ib->length_dw++] = 0x00000000;
600 
601 	ib->ptr[ib->length_dw++] = 0x00000008;
602 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
603 
604 	for (i = ib->length_dw; i < ib_size_dw; ++i)
605 		ib->ptr[i] = 0x0;
606 
607 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
608 	job->fence = dma_fence_get(f);
609 	if (r)
610 		goto err;
611 
612 	amdgpu_job_free(job);
613 	if (fence)
614 		*fence = dma_fence_get(f);
615 	dma_fence_put(f);
616 
617 	return 0;
618 
619 err:
620 	amdgpu_job_free(job);
621 	return r;
622 }
623 
624 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
625 {
626 	struct dma_fence *fence = NULL;
627 	long r;
628 
629 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
630 	if (r) {
631 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
632 		goto error;
633 	}
634 
635 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
636 	if (r) {
637 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
638 		goto error;
639 	}
640 
641 	r = dma_fence_wait_timeout(fence, false, timeout);
642 	if (r == 0) {
643 		DRM_ERROR("amdgpu: IB test timed out.\n");
644 		r = -ETIMEDOUT;
645 	} else if (r < 0) {
646 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
647 	} else {
648 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
649 		r = 0;
650 	}
651 error:
652 	dma_fence_put(fence);
653 	return r;
654 }
655