xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c (revision 2e35facf82bcdd9b9eb9129f4fb31127b79249ec)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <drm/drmP.h>
30 #include <drm/drm.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_vcn.h"
35 #include "soc15d.h"
36 #include "soc15_common.h"
37 
38 #include "vcn/vcn_1_0_offset.h"
39 #include "vcn/vcn_1_0_sh_mask.h"
40 
41 /* 1 second timeout */
42 #define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
43 
44 /* Firmware Names */
45 #define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
46 #define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
47 #define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
48 #define FIRMWARE_NAVI10 	"amdgpu/navi10_vcn.bin"
49 
50 MODULE_FIRMWARE(FIRMWARE_RAVEN);
51 MODULE_FIRMWARE(FIRMWARE_PICASSO);
52 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
53 MODULE_FIRMWARE(FIRMWARE_NAVI10);
54 
55 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
56 
57 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
58 {
59 	unsigned long bo_size;
60 	const char *fw_name;
61 	const struct common_firmware_header *hdr;
62 	unsigned char fw_check;
63 	int r;
64 
65 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
66 
67 	switch (adev->asic_type) {
68 	case CHIP_RAVEN:
69 		if (adev->rev_id >= 8)
70 			fw_name = FIRMWARE_RAVEN2;
71 		else if (adev->pdev->device == 0x15d8)
72 			fw_name = FIRMWARE_PICASSO;
73 		else
74 			fw_name = FIRMWARE_RAVEN;
75 		break;
76 	case CHIP_NAVI10:
77 		fw_name = FIRMWARE_NAVI10;
78 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
79 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
80 			adev->vcn.indirect_sram = true;
81 		break;
82 	default:
83 		return -EINVAL;
84 	}
85 
86 	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
87 	if (r) {
88 		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
89 			fw_name);
90 		return r;
91 	}
92 
93 	r = amdgpu_ucode_validate(adev->vcn.fw);
94 	if (r) {
95 		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
96 			fw_name);
97 		release_firmware(adev->vcn.fw);
98 		adev->vcn.fw = NULL;
99 		return r;
100 	}
101 
102 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
103 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
104 
105 	/* Bit 20-23, it is encode major and non-zero for new naming convention.
106 	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
107 	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
108 	 * is zero in old naming convention, this field is always zero so far.
109 	 * These four bits are used to tell which naming convention is present.
110 	 */
111 	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
112 	if (fw_check) {
113 		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
114 
115 		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
116 		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
117 		enc_major = fw_check;
118 		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
119 		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
120 		DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
121 			enc_major, enc_minor, dec_ver, vep, fw_rev);
122 	} else {
123 		unsigned int version_major, version_minor, family_id;
124 
125 		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
126 		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
127 		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
128 		DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
129 			version_major, version_minor, family_id);
130 	}
131 
132 	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
133 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
134 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
135 	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
136 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
137 				    &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
138 	if (r) {
139 		dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
140 		return r;
141 	}
142 
143 	if (adev->vcn.indirect_sram) {
144 		r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
145 			    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
146 			    &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
147 		if (r) {
148 			dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
149 			return r;
150 		}
151 	}
152 
153 	return 0;
154 }
155 
156 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
157 {
158 	int i;
159 
160 	kvfree(adev->vcn.saved_bo);
161 
162 	if (adev->vcn.indirect_sram) {
163 		amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
164 			      &adev->vcn.dpg_sram_gpu_addr,
165 			      (void **)&adev->vcn.dpg_sram_cpu_addr);
166 	}
167 
168 	amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
169 			      &adev->vcn.gpu_addr,
170 			      (void **)&adev->vcn.cpu_addr);
171 
172 	amdgpu_ring_fini(&adev->vcn.ring_dec);
173 
174 	for (i = 0; i < adev->vcn.num_enc_rings; ++i)
175 		amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
176 
177 	amdgpu_ring_fini(&adev->vcn.ring_jpeg);
178 
179 	release_firmware(adev->vcn.fw);
180 
181 	return 0;
182 }
183 
184 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
185 {
186 	unsigned size;
187 	void *ptr;
188 
189 	cancel_delayed_work_sync(&adev->vcn.idle_work);
190 
191 	if (adev->vcn.vcpu_bo == NULL)
192 		return 0;
193 
194 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
195 	ptr = adev->vcn.cpu_addr;
196 
197 	adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
198 	if (!adev->vcn.saved_bo)
199 		return -ENOMEM;
200 
201 	memcpy_fromio(adev->vcn.saved_bo, ptr, size);
202 
203 	return 0;
204 }
205 
206 int amdgpu_vcn_resume(struct amdgpu_device *adev)
207 {
208 	unsigned size;
209 	void *ptr;
210 
211 	if (adev->vcn.vcpu_bo == NULL)
212 		return -EINVAL;
213 
214 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
215 	ptr = adev->vcn.cpu_addr;
216 
217 	if (adev->vcn.saved_bo != NULL) {
218 		memcpy_toio(ptr, adev->vcn.saved_bo, size);
219 		kvfree(adev->vcn.saved_bo);
220 		adev->vcn.saved_bo = NULL;
221 	} else {
222 		const struct common_firmware_header *hdr;
223 		unsigned offset;
224 
225 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
226 		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
227 			offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
228 			memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
229 				    le32_to_cpu(hdr->ucode_size_bytes));
230 			size -= le32_to_cpu(hdr->ucode_size_bytes);
231 			ptr += le32_to_cpu(hdr->ucode_size_bytes);
232 		}
233 		memset_io(ptr, 0, size);
234 	}
235 
236 	return 0;
237 }
238 
239 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
240 {
241 	struct amdgpu_device *adev =
242 		container_of(work, struct amdgpu_device, vcn.idle_work.work);
243 	unsigned int fences = 0;
244 	unsigned int i;
245 
246 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
247 		fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
248 	}
249 
250 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
251 		struct dpg_pause_state new_state;
252 
253 		if (fences)
254 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
255 		else
256 			new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
257 
258 		if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
259 			new_state.jpeg = VCN_DPG_STATE__PAUSE;
260 		else
261 			new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
262 
263 		adev->vcn.pause_dpg_mode(adev, &new_state);
264 	}
265 
266 	fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
267 	fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
268 
269 	if (fences == 0) {
270 		amdgpu_gfx_off_ctrl(adev, true);
271 		if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
272 			amdgpu_dpm_enable_uvd(adev, false);
273 		else
274 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
275 							       AMD_PG_STATE_GATE);
276 	} else {
277 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
278 	}
279 }
280 
281 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
282 {
283 	struct amdgpu_device *adev = ring->adev;
284 	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
285 
286 	if (set_clocks) {
287 		amdgpu_gfx_off_ctrl(adev, false);
288 		if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
289 			amdgpu_dpm_enable_uvd(adev, true);
290 		else
291 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
292 							       AMD_PG_STATE_UNGATE);
293 	}
294 
295 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
296 		struct dpg_pause_state new_state;
297 		unsigned int fences = 0;
298 		unsigned int i;
299 
300 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
301 			fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
302 		}
303 		if (fences)
304 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
305 		else
306 			new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
307 
308 		if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
309 			new_state.jpeg = VCN_DPG_STATE__PAUSE;
310 		else
311 			new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
312 
313 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
314 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
315 		else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
316 			new_state.jpeg = VCN_DPG_STATE__PAUSE;
317 
318 		adev->vcn.pause_dpg_mode(adev, &new_state);
319 	}
320 }
321 
322 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
323 {
324 	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
325 }
326 
327 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
328 {
329 	struct amdgpu_device *adev = ring->adev;
330 	uint32_t tmp = 0;
331 	unsigned i;
332 	int r;
333 
334 	WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
335 	r = amdgpu_ring_alloc(ring, 3);
336 	if (r)
337 		return r;
338 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
339 	amdgpu_ring_write(ring, 0xDEADBEEF);
340 	amdgpu_ring_commit(ring);
341 	for (i = 0; i < adev->usec_timeout; i++) {
342 		tmp = RREG32(adev->vcn.external.scratch9);
343 		if (tmp == 0xDEADBEEF)
344 			break;
345 		DRM_UDELAY(1);
346 	}
347 
348 	if (i >= adev->usec_timeout)
349 		r = -ETIMEDOUT;
350 
351 	return r;
352 }
353 
354 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
355 				   struct amdgpu_bo *bo,
356 				   struct dma_fence **fence)
357 {
358 	struct amdgpu_device *adev = ring->adev;
359 	struct dma_fence *f = NULL;
360 	struct amdgpu_job *job;
361 	struct amdgpu_ib *ib;
362 	uint64_t addr;
363 	int i, r;
364 
365 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
366 	if (r)
367 		goto err;
368 
369 	ib = &job->ibs[0];
370 	addr = amdgpu_bo_gpu_offset(bo);
371 	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
372 	ib->ptr[1] = addr;
373 	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
374 	ib->ptr[3] = addr >> 32;
375 	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
376 	ib->ptr[5] = 0;
377 	for (i = 6; i < 16; i += 2) {
378 		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
379 		ib->ptr[i+1] = 0;
380 	}
381 	ib->length_dw = 16;
382 
383 	r = amdgpu_job_submit_direct(job, ring, &f);
384 	if (r)
385 		goto err_free;
386 
387 	amdgpu_bo_fence(bo, f, false);
388 	amdgpu_bo_unreserve(bo);
389 	amdgpu_bo_unref(&bo);
390 
391 	if (fence)
392 		*fence = dma_fence_get(f);
393 	dma_fence_put(f);
394 
395 	return 0;
396 
397 err_free:
398 	amdgpu_job_free(job);
399 
400 err:
401 	amdgpu_bo_unreserve(bo);
402 	amdgpu_bo_unref(&bo);
403 	return r;
404 }
405 
406 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
407 			      struct dma_fence **fence)
408 {
409 	struct amdgpu_device *adev = ring->adev;
410 	struct amdgpu_bo *bo = NULL;
411 	uint32_t *msg;
412 	int r, i;
413 
414 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
415 				      AMDGPU_GEM_DOMAIN_VRAM,
416 				      &bo, NULL, (void **)&msg);
417 	if (r)
418 		return r;
419 
420 	msg[0] = cpu_to_le32(0x00000028);
421 	msg[1] = cpu_to_le32(0x00000038);
422 	msg[2] = cpu_to_le32(0x00000001);
423 	msg[3] = cpu_to_le32(0x00000000);
424 	msg[4] = cpu_to_le32(handle);
425 	msg[5] = cpu_to_le32(0x00000000);
426 	msg[6] = cpu_to_le32(0x00000001);
427 	msg[7] = cpu_to_le32(0x00000028);
428 	msg[8] = cpu_to_le32(0x00000010);
429 	msg[9] = cpu_to_le32(0x00000000);
430 	msg[10] = cpu_to_le32(0x00000007);
431 	msg[11] = cpu_to_le32(0x00000000);
432 	msg[12] = cpu_to_le32(0x00000780);
433 	msg[13] = cpu_to_le32(0x00000440);
434 	for (i = 14; i < 1024; ++i)
435 		msg[i] = cpu_to_le32(0x0);
436 
437 	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
438 }
439 
440 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
441 			       struct dma_fence **fence)
442 {
443 	struct amdgpu_device *adev = ring->adev;
444 	struct amdgpu_bo *bo = NULL;
445 	uint32_t *msg;
446 	int r, i;
447 
448 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
449 				      AMDGPU_GEM_DOMAIN_VRAM,
450 				      &bo, NULL, (void **)&msg);
451 	if (r)
452 		return r;
453 
454 	msg[0] = cpu_to_le32(0x00000028);
455 	msg[1] = cpu_to_le32(0x00000018);
456 	msg[2] = cpu_to_le32(0x00000000);
457 	msg[3] = cpu_to_le32(0x00000002);
458 	msg[4] = cpu_to_le32(handle);
459 	msg[5] = cpu_to_le32(0x00000000);
460 	for (i = 6; i < 1024; ++i)
461 		msg[i] = cpu_to_le32(0x0);
462 
463 	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
464 }
465 
466 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
467 {
468 	struct dma_fence *fence;
469 	long r;
470 
471 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
472 	if (r)
473 		goto error;
474 
475 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
476 	if (r)
477 		goto error;
478 
479 	r = dma_fence_wait_timeout(fence, false, timeout);
480 	if (r == 0)
481 		r = -ETIMEDOUT;
482 	else if (r > 0)
483 		r = 0;
484 
485 	dma_fence_put(fence);
486 error:
487 	return r;
488 }
489 
490 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
491 {
492 	struct amdgpu_device *adev = ring->adev;
493 	uint32_t rptr;
494 	unsigned i;
495 	int r;
496 
497 	r = amdgpu_ring_alloc(ring, 16);
498 	if (r)
499 		return r;
500 
501 	rptr = amdgpu_ring_get_rptr(ring);
502 
503 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
504 	amdgpu_ring_commit(ring);
505 
506 	for (i = 0; i < adev->usec_timeout; i++) {
507 		if (amdgpu_ring_get_rptr(ring) != rptr)
508 			break;
509 		DRM_UDELAY(1);
510 	}
511 
512 	if (i >= adev->usec_timeout)
513 		r = -ETIMEDOUT;
514 
515 	return r;
516 }
517 
518 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
519 			      struct dma_fence **fence)
520 {
521 	const unsigned ib_size_dw = 16;
522 	struct amdgpu_job *job;
523 	struct amdgpu_ib *ib;
524 	struct dma_fence *f = NULL;
525 	uint64_t dummy;
526 	int i, r;
527 
528 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
529 	if (r)
530 		return r;
531 
532 	ib = &job->ibs[0];
533 	dummy = ib->gpu_addr + 1024;
534 
535 	ib->length_dw = 0;
536 	ib->ptr[ib->length_dw++] = 0x00000018;
537 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
538 	ib->ptr[ib->length_dw++] = handle;
539 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
540 	ib->ptr[ib->length_dw++] = dummy;
541 	ib->ptr[ib->length_dw++] = 0x0000000b;
542 
543 	ib->ptr[ib->length_dw++] = 0x00000014;
544 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
545 	ib->ptr[ib->length_dw++] = 0x0000001c;
546 	ib->ptr[ib->length_dw++] = 0x00000000;
547 	ib->ptr[ib->length_dw++] = 0x00000000;
548 
549 	ib->ptr[ib->length_dw++] = 0x00000008;
550 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
551 
552 	for (i = ib->length_dw; i < ib_size_dw; ++i)
553 		ib->ptr[i] = 0x0;
554 
555 	r = amdgpu_job_submit_direct(job, ring, &f);
556 	if (r)
557 		goto err;
558 
559 	if (fence)
560 		*fence = dma_fence_get(f);
561 	dma_fence_put(f);
562 
563 	return 0;
564 
565 err:
566 	amdgpu_job_free(job);
567 	return r;
568 }
569 
570 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
571 				struct dma_fence **fence)
572 {
573 	const unsigned ib_size_dw = 16;
574 	struct amdgpu_job *job;
575 	struct amdgpu_ib *ib;
576 	struct dma_fence *f = NULL;
577 	uint64_t dummy;
578 	int i, r;
579 
580 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
581 	if (r)
582 		return r;
583 
584 	ib = &job->ibs[0];
585 	dummy = ib->gpu_addr + 1024;
586 
587 	ib->length_dw = 0;
588 	ib->ptr[ib->length_dw++] = 0x00000018;
589 	ib->ptr[ib->length_dw++] = 0x00000001;
590 	ib->ptr[ib->length_dw++] = handle;
591 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
592 	ib->ptr[ib->length_dw++] = dummy;
593 	ib->ptr[ib->length_dw++] = 0x0000000b;
594 
595 	ib->ptr[ib->length_dw++] = 0x00000014;
596 	ib->ptr[ib->length_dw++] = 0x00000002;
597 	ib->ptr[ib->length_dw++] = 0x0000001c;
598 	ib->ptr[ib->length_dw++] = 0x00000000;
599 	ib->ptr[ib->length_dw++] = 0x00000000;
600 
601 	ib->ptr[ib->length_dw++] = 0x00000008;
602 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
603 
604 	for (i = ib->length_dw; i < ib_size_dw; ++i)
605 		ib->ptr[i] = 0x0;
606 
607 	r = amdgpu_job_submit_direct(job, ring, &f);
608 	if (r)
609 		goto err;
610 
611 	if (fence)
612 		*fence = dma_fence_get(f);
613 	dma_fence_put(f);
614 
615 	return 0;
616 
617 err:
618 	amdgpu_job_free(job);
619 	return r;
620 }
621 
622 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
623 {
624 	struct dma_fence *fence = NULL;
625 	long r;
626 
627 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
628 	if (r)
629 		goto error;
630 
631 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
632 	if (r)
633 		goto error;
634 
635 	r = dma_fence_wait_timeout(fence, false, timeout);
636 	if (r == 0)
637 		r = -ETIMEDOUT;
638 	else if (r > 0)
639 		r = 0;
640 
641 error:
642 	dma_fence_put(fence);
643 	return r;
644 }
645 
646 int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
647 {
648 	struct amdgpu_device *adev = ring->adev;
649 	uint32_t tmp = 0;
650 	unsigned i;
651 	int r;
652 
653 	WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD);
654 	r = amdgpu_ring_alloc(ring, 3);
655 	if (r)
656 		return r;
657 
658 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
659 	amdgpu_ring_write(ring, 0xDEADBEEF);
660 	amdgpu_ring_commit(ring);
661 
662 	for (i = 0; i < adev->usec_timeout; i++) {
663 		tmp = RREG32(adev->vcn.external.jpeg_pitch);
664 		if (tmp == 0xDEADBEEF)
665 			break;
666 		DRM_UDELAY(1);
667 	}
668 
669 	if (i >= adev->usec_timeout)
670 		r = -ETIMEDOUT;
671 
672 	return r;
673 }
674 
675 static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
676 		struct dma_fence **fence)
677 {
678 	struct amdgpu_device *adev = ring->adev;
679 	struct amdgpu_job *job;
680 	struct amdgpu_ib *ib;
681 	struct dma_fence *f = NULL;
682 	const unsigned ib_size_dw = 16;
683 	int i, r;
684 
685 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
686 	if (r)
687 		return r;
688 
689 	ib = &job->ibs[0];
690 
691 	ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
692 	ib->ptr[1] = 0xDEADBEEF;
693 	for (i = 2; i < 16; i += 2) {
694 		ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
695 		ib->ptr[i+1] = 0;
696 	}
697 	ib->length_dw = 16;
698 
699 	r = amdgpu_job_submit_direct(job, ring, &f);
700 	if (r)
701 		goto err;
702 
703 	if (fence)
704 		*fence = dma_fence_get(f);
705 	dma_fence_put(f);
706 
707 	return 0;
708 
709 err:
710 	amdgpu_job_free(job);
711 	return r;
712 }
713 
714 int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
715 {
716 	struct amdgpu_device *adev = ring->adev;
717 	uint32_t tmp = 0;
718 	unsigned i;
719 	struct dma_fence *fence = NULL;
720 	long r = 0;
721 
722 	r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
723 	if (r)
724 		goto error;
725 
726 	r = dma_fence_wait_timeout(fence, false, timeout);
727 	if (r == 0) {
728 		r = -ETIMEDOUT;
729 		goto error;
730 	} else if (r < 0) {
731 		goto error;
732 	} else {
733 		r = 0;
734 	}
735 
736 	for (i = 0; i < adev->usec_timeout; i++) {
737 		tmp = RREG32(adev->vcn.external.jpeg_pitch);
738 		if (tmp == 0xDEADBEEF)
739 			break;
740 		DRM_UDELAY(1);
741 	}
742 
743 	if (i >= adev->usec_timeout)
744 		r = -ETIMEDOUT;
745 
746 	dma_fence_put(fence);
747 error:
748 	return r;
749 }
750