1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 
31 #include <drm/drm.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vcn.h"
36 #include "soc15d.h"
37 #include "soc15_common.h"
38 
39 #include "vcn/vcn_1_0_offset.h"
40 #include "vcn/vcn_1_0_sh_mask.h"
41 
42 /* 1 second timeout */
43 #define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
44 
45 /* Firmware Names */
46 #define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
47 #define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
48 #define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
49 #define FIRMWARE_NAVI10 	"amdgpu/navi10_vcn.bin"
50 
51 MODULE_FIRMWARE(FIRMWARE_RAVEN);
52 MODULE_FIRMWARE(FIRMWARE_PICASSO);
53 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
54 MODULE_FIRMWARE(FIRMWARE_NAVI10);
55 
56 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
57 
58 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
59 {
60 	unsigned long bo_size;
61 	const char *fw_name;
62 	const struct common_firmware_header *hdr;
63 	unsigned char fw_check;
64 	int r;
65 
66 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
67 
68 	switch (adev->asic_type) {
69 	case CHIP_RAVEN:
70 		if (adev->rev_id >= 8)
71 			fw_name = FIRMWARE_RAVEN2;
72 		else if (adev->pdev->device == 0x15d8)
73 			fw_name = FIRMWARE_PICASSO;
74 		else
75 			fw_name = FIRMWARE_RAVEN;
76 		break;
77 	case CHIP_NAVI10:
78 		fw_name = FIRMWARE_NAVI10;
79 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
80 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
81 			adev->vcn.indirect_sram = true;
82 		break;
83 	default:
84 		return -EINVAL;
85 	}
86 
87 	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
88 	if (r) {
89 		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
90 			fw_name);
91 		return r;
92 	}
93 
94 	r = amdgpu_ucode_validate(adev->vcn.fw);
95 	if (r) {
96 		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
97 			fw_name);
98 		release_firmware(adev->vcn.fw);
99 		adev->vcn.fw = NULL;
100 		return r;
101 	}
102 
103 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
104 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
105 
106 	/* Bit 20-23, it is encode major and non-zero for new naming convention.
107 	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
108 	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
109 	 * is zero in old naming convention, this field is always zero so far.
110 	 * These four bits are used to tell which naming convention is present.
111 	 */
112 	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
113 	if (fw_check) {
114 		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
115 
116 		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
117 		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
118 		enc_major = fw_check;
119 		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
120 		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
121 		DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
122 			enc_major, enc_minor, dec_ver, vep, fw_rev);
123 	} else {
124 		unsigned int version_major, version_minor, family_id;
125 
126 		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
127 		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
128 		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
129 		DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
130 			version_major, version_minor, family_id);
131 	}
132 
133 	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
134 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
135 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
136 	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
137 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
138 				    &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
139 	if (r) {
140 		dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
141 		return r;
142 	}
143 
144 	if (adev->vcn.indirect_sram) {
145 		r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
146 			    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
147 			    &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
148 		if (r) {
149 			dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
150 			return r;
151 		}
152 	}
153 
154 	return 0;
155 }
156 
157 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
158 {
159 	int i;
160 
161 	kvfree(adev->vcn.saved_bo);
162 
163 	if (adev->vcn.indirect_sram) {
164 		amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
165 			      &adev->vcn.dpg_sram_gpu_addr,
166 			      (void **)&adev->vcn.dpg_sram_cpu_addr);
167 	}
168 
169 	amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
170 			      &adev->vcn.gpu_addr,
171 			      (void **)&adev->vcn.cpu_addr);
172 
173 	amdgpu_ring_fini(&adev->vcn.ring_dec);
174 
175 	for (i = 0; i < adev->vcn.num_enc_rings; ++i)
176 		amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
177 
178 	amdgpu_ring_fini(&adev->vcn.ring_jpeg);
179 
180 	release_firmware(adev->vcn.fw);
181 
182 	return 0;
183 }
184 
185 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
186 {
187 	unsigned size;
188 	void *ptr;
189 
190 	cancel_delayed_work_sync(&adev->vcn.idle_work);
191 
192 	if (adev->vcn.vcpu_bo == NULL)
193 		return 0;
194 
195 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
196 	ptr = adev->vcn.cpu_addr;
197 
198 	adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
199 	if (!adev->vcn.saved_bo)
200 		return -ENOMEM;
201 
202 	memcpy_fromio(adev->vcn.saved_bo, ptr, size);
203 
204 	return 0;
205 }
206 
207 int amdgpu_vcn_resume(struct amdgpu_device *adev)
208 {
209 	unsigned size;
210 	void *ptr;
211 
212 	if (adev->vcn.vcpu_bo == NULL)
213 		return -EINVAL;
214 
215 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
216 	ptr = adev->vcn.cpu_addr;
217 
218 	if (adev->vcn.saved_bo != NULL) {
219 		memcpy_toio(ptr, adev->vcn.saved_bo, size);
220 		kvfree(adev->vcn.saved_bo);
221 		adev->vcn.saved_bo = NULL;
222 	} else {
223 		const struct common_firmware_header *hdr;
224 		unsigned offset;
225 
226 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
227 		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
228 			offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
229 			memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
230 				    le32_to_cpu(hdr->ucode_size_bytes));
231 			size -= le32_to_cpu(hdr->ucode_size_bytes);
232 			ptr += le32_to_cpu(hdr->ucode_size_bytes);
233 		}
234 		memset_io(ptr, 0, size);
235 	}
236 
237 	return 0;
238 }
239 
240 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
241 {
242 	struct amdgpu_device *adev =
243 		container_of(work, struct amdgpu_device, vcn.idle_work.work);
244 	unsigned int fences = 0;
245 	unsigned int i;
246 
247 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
248 		fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
249 	}
250 
251 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
252 		struct dpg_pause_state new_state;
253 
254 		if (fences)
255 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
256 		else
257 			new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
258 
259 		if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
260 			new_state.jpeg = VCN_DPG_STATE__PAUSE;
261 		else
262 			new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
263 
264 		adev->vcn.pause_dpg_mode(adev, &new_state);
265 	}
266 
267 	fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
268 	fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
269 
270 	if (fences == 0) {
271 		amdgpu_gfx_off_ctrl(adev, true);
272 		if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
273 			amdgpu_dpm_enable_uvd(adev, false);
274 		else
275 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
276 							       AMD_PG_STATE_GATE);
277 	} else {
278 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
279 	}
280 }
281 
282 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
283 {
284 	struct amdgpu_device *adev = ring->adev;
285 	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
286 
287 	if (set_clocks) {
288 		amdgpu_gfx_off_ctrl(adev, false);
289 		if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
290 			amdgpu_dpm_enable_uvd(adev, true);
291 		else
292 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
293 							       AMD_PG_STATE_UNGATE);
294 	}
295 
296 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
297 		struct dpg_pause_state new_state;
298 		unsigned int fences = 0;
299 		unsigned int i;
300 
301 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
302 			fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
303 		}
304 		if (fences)
305 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
306 		else
307 			new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
308 
309 		if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
310 			new_state.jpeg = VCN_DPG_STATE__PAUSE;
311 		else
312 			new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
313 
314 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
315 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
316 		else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
317 			new_state.jpeg = VCN_DPG_STATE__PAUSE;
318 
319 		adev->vcn.pause_dpg_mode(adev, &new_state);
320 	}
321 }
322 
323 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
324 {
325 	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
326 }
327 
328 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
329 {
330 	struct amdgpu_device *adev = ring->adev;
331 	uint32_t tmp = 0;
332 	unsigned i;
333 	int r;
334 
335 	WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
336 	r = amdgpu_ring_alloc(ring, 3);
337 	if (r)
338 		return r;
339 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
340 	amdgpu_ring_write(ring, 0xDEADBEEF);
341 	amdgpu_ring_commit(ring);
342 	for (i = 0; i < adev->usec_timeout; i++) {
343 		tmp = RREG32(adev->vcn.external.scratch9);
344 		if (tmp == 0xDEADBEEF)
345 			break;
346 		udelay(1);
347 	}
348 
349 	if (i >= adev->usec_timeout)
350 		r = -ETIMEDOUT;
351 
352 	return r;
353 }
354 
355 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
356 				   struct amdgpu_bo *bo,
357 				   struct dma_fence **fence)
358 {
359 	struct amdgpu_device *adev = ring->adev;
360 	struct dma_fence *f = NULL;
361 	struct amdgpu_job *job;
362 	struct amdgpu_ib *ib;
363 	uint64_t addr;
364 	int i, r;
365 
366 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
367 	if (r)
368 		goto err;
369 
370 	ib = &job->ibs[0];
371 	addr = amdgpu_bo_gpu_offset(bo);
372 	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
373 	ib->ptr[1] = addr;
374 	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
375 	ib->ptr[3] = addr >> 32;
376 	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
377 	ib->ptr[5] = 0;
378 	for (i = 6; i < 16; i += 2) {
379 		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
380 		ib->ptr[i+1] = 0;
381 	}
382 	ib->length_dw = 16;
383 
384 	r = amdgpu_job_submit_direct(job, ring, &f);
385 	if (r)
386 		goto err_free;
387 
388 	amdgpu_bo_fence(bo, f, false);
389 	amdgpu_bo_unreserve(bo);
390 	amdgpu_bo_unref(&bo);
391 
392 	if (fence)
393 		*fence = dma_fence_get(f);
394 	dma_fence_put(f);
395 
396 	return 0;
397 
398 err_free:
399 	amdgpu_job_free(job);
400 
401 err:
402 	amdgpu_bo_unreserve(bo);
403 	amdgpu_bo_unref(&bo);
404 	return r;
405 }
406 
407 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
408 			      struct dma_fence **fence)
409 {
410 	struct amdgpu_device *adev = ring->adev;
411 	struct amdgpu_bo *bo = NULL;
412 	uint32_t *msg;
413 	int r, i;
414 
415 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
416 				      AMDGPU_GEM_DOMAIN_VRAM,
417 				      &bo, NULL, (void **)&msg);
418 	if (r)
419 		return r;
420 
421 	msg[0] = cpu_to_le32(0x00000028);
422 	msg[1] = cpu_to_le32(0x00000038);
423 	msg[2] = cpu_to_le32(0x00000001);
424 	msg[3] = cpu_to_le32(0x00000000);
425 	msg[4] = cpu_to_le32(handle);
426 	msg[5] = cpu_to_le32(0x00000000);
427 	msg[6] = cpu_to_le32(0x00000001);
428 	msg[7] = cpu_to_le32(0x00000028);
429 	msg[8] = cpu_to_le32(0x00000010);
430 	msg[9] = cpu_to_le32(0x00000000);
431 	msg[10] = cpu_to_le32(0x00000007);
432 	msg[11] = cpu_to_le32(0x00000000);
433 	msg[12] = cpu_to_le32(0x00000780);
434 	msg[13] = cpu_to_le32(0x00000440);
435 	for (i = 14; i < 1024; ++i)
436 		msg[i] = cpu_to_le32(0x0);
437 
438 	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
439 }
440 
441 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
442 			       struct dma_fence **fence)
443 {
444 	struct amdgpu_device *adev = ring->adev;
445 	struct amdgpu_bo *bo = NULL;
446 	uint32_t *msg;
447 	int r, i;
448 
449 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
450 				      AMDGPU_GEM_DOMAIN_VRAM,
451 				      &bo, NULL, (void **)&msg);
452 	if (r)
453 		return r;
454 
455 	msg[0] = cpu_to_le32(0x00000028);
456 	msg[1] = cpu_to_le32(0x00000018);
457 	msg[2] = cpu_to_le32(0x00000000);
458 	msg[3] = cpu_to_le32(0x00000002);
459 	msg[4] = cpu_to_le32(handle);
460 	msg[5] = cpu_to_le32(0x00000000);
461 	for (i = 6; i < 1024; ++i)
462 		msg[i] = cpu_to_le32(0x0);
463 
464 	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
465 }
466 
467 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
468 {
469 	struct dma_fence *fence;
470 	long r;
471 
472 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
473 	if (r)
474 		goto error;
475 
476 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
477 	if (r)
478 		goto error;
479 
480 	r = dma_fence_wait_timeout(fence, false, timeout);
481 	if (r == 0)
482 		r = -ETIMEDOUT;
483 	else if (r > 0)
484 		r = 0;
485 
486 	dma_fence_put(fence);
487 error:
488 	return r;
489 }
490 
491 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
492 {
493 	struct amdgpu_device *adev = ring->adev;
494 	uint32_t rptr;
495 	unsigned i;
496 	int r;
497 
498 	r = amdgpu_ring_alloc(ring, 16);
499 	if (r)
500 		return r;
501 
502 	rptr = amdgpu_ring_get_rptr(ring);
503 
504 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
505 	amdgpu_ring_commit(ring);
506 
507 	for (i = 0; i < adev->usec_timeout; i++) {
508 		if (amdgpu_ring_get_rptr(ring) != rptr)
509 			break;
510 		udelay(1);
511 	}
512 
513 	if (i >= adev->usec_timeout)
514 		r = -ETIMEDOUT;
515 
516 	return r;
517 }
518 
519 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
520 			      struct dma_fence **fence)
521 {
522 	const unsigned ib_size_dw = 16;
523 	struct amdgpu_job *job;
524 	struct amdgpu_ib *ib;
525 	struct dma_fence *f = NULL;
526 	uint64_t dummy;
527 	int i, r;
528 
529 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
530 	if (r)
531 		return r;
532 
533 	ib = &job->ibs[0];
534 	dummy = ib->gpu_addr + 1024;
535 
536 	ib->length_dw = 0;
537 	ib->ptr[ib->length_dw++] = 0x00000018;
538 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
539 	ib->ptr[ib->length_dw++] = handle;
540 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
541 	ib->ptr[ib->length_dw++] = dummy;
542 	ib->ptr[ib->length_dw++] = 0x0000000b;
543 
544 	ib->ptr[ib->length_dw++] = 0x00000014;
545 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
546 	ib->ptr[ib->length_dw++] = 0x0000001c;
547 	ib->ptr[ib->length_dw++] = 0x00000000;
548 	ib->ptr[ib->length_dw++] = 0x00000000;
549 
550 	ib->ptr[ib->length_dw++] = 0x00000008;
551 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
552 
553 	for (i = ib->length_dw; i < ib_size_dw; ++i)
554 		ib->ptr[i] = 0x0;
555 
556 	r = amdgpu_job_submit_direct(job, ring, &f);
557 	if (r)
558 		goto err;
559 
560 	if (fence)
561 		*fence = dma_fence_get(f);
562 	dma_fence_put(f);
563 
564 	return 0;
565 
566 err:
567 	amdgpu_job_free(job);
568 	return r;
569 }
570 
571 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
572 				struct dma_fence **fence)
573 {
574 	const unsigned ib_size_dw = 16;
575 	struct amdgpu_job *job;
576 	struct amdgpu_ib *ib;
577 	struct dma_fence *f = NULL;
578 	uint64_t dummy;
579 	int i, r;
580 
581 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
582 	if (r)
583 		return r;
584 
585 	ib = &job->ibs[0];
586 	dummy = ib->gpu_addr + 1024;
587 
588 	ib->length_dw = 0;
589 	ib->ptr[ib->length_dw++] = 0x00000018;
590 	ib->ptr[ib->length_dw++] = 0x00000001;
591 	ib->ptr[ib->length_dw++] = handle;
592 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
593 	ib->ptr[ib->length_dw++] = dummy;
594 	ib->ptr[ib->length_dw++] = 0x0000000b;
595 
596 	ib->ptr[ib->length_dw++] = 0x00000014;
597 	ib->ptr[ib->length_dw++] = 0x00000002;
598 	ib->ptr[ib->length_dw++] = 0x0000001c;
599 	ib->ptr[ib->length_dw++] = 0x00000000;
600 	ib->ptr[ib->length_dw++] = 0x00000000;
601 
602 	ib->ptr[ib->length_dw++] = 0x00000008;
603 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
604 
605 	for (i = ib->length_dw; i < ib_size_dw; ++i)
606 		ib->ptr[i] = 0x0;
607 
608 	r = amdgpu_job_submit_direct(job, ring, &f);
609 	if (r)
610 		goto err;
611 
612 	if (fence)
613 		*fence = dma_fence_get(f);
614 	dma_fence_put(f);
615 
616 	return 0;
617 
618 err:
619 	amdgpu_job_free(job);
620 	return r;
621 }
622 
623 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
624 {
625 	struct dma_fence *fence = NULL;
626 	long r;
627 
628 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
629 	if (r)
630 		goto error;
631 
632 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
633 	if (r)
634 		goto error;
635 
636 	r = dma_fence_wait_timeout(fence, false, timeout);
637 	if (r == 0)
638 		r = -ETIMEDOUT;
639 	else if (r > 0)
640 		r = 0;
641 
642 error:
643 	dma_fence_put(fence);
644 	return r;
645 }
646 
647 int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
648 {
649 	struct amdgpu_device *adev = ring->adev;
650 	uint32_t tmp = 0;
651 	unsigned i;
652 	int r;
653 
654 	WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD);
655 	r = amdgpu_ring_alloc(ring, 3);
656 	if (r)
657 		return r;
658 
659 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
660 	amdgpu_ring_write(ring, 0xDEADBEEF);
661 	amdgpu_ring_commit(ring);
662 
663 	for (i = 0; i < adev->usec_timeout; i++) {
664 		tmp = RREG32(adev->vcn.external.jpeg_pitch);
665 		if (tmp == 0xDEADBEEF)
666 			break;
667 		udelay(1);
668 	}
669 
670 	if (i >= adev->usec_timeout)
671 		r = -ETIMEDOUT;
672 
673 	return r;
674 }
675 
676 static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
677 		struct dma_fence **fence)
678 {
679 	struct amdgpu_device *adev = ring->adev;
680 	struct amdgpu_job *job;
681 	struct amdgpu_ib *ib;
682 	struct dma_fence *f = NULL;
683 	const unsigned ib_size_dw = 16;
684 	int i, r;
685 
686 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
687 	if (r)
688 		return r;
689 
690 	ib = &job->ibs[0];
691 
692 	ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
693 	ib->ptr[1] = 0xDEADBEEF;
694 	for (i = 2; i < 16; i += 2) {
695 		ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
696 		ib->ptr[i+1] = 0;
697 	}
698 	ib->length_dw = 16;
699 
700 	r = amdgpu_job_submit_direct(job, ring, &f);
701 	if (r)
702 		goto err;
703 
704 	if (fence)
705 		*fence = dma_fence_get(f);
706 	dma_fence_put(f);
707 
708 	return 0;
709 
710 err:
711 	amdgpu_job_free(job);
712 	return r;
713 }
714 
715 int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
716 {
717 	struct amdgpu_device *adev = ring->adev;
718 	uint32_t tmp = 0;
719 	unsigned i;
720 	struct dma_fence *fence = NULL;
721 	long r = 0;
722 
723 	r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
724 	if (r)
725 		goto error;
726 
727 	r = dma_fence_wait_timeout(fence, false, timeout);
728 	if (r == 0) {
729 		r = -ETIMEDOUT;
730 		goto error;
731 	} else if (r < 0) {
732 		goto error;
733 	} else {
734 		r = 0;
735 	}
736 
737 	for (i = 0; i < adev->usec_timeout; i++) {
738 		tmp = RREG32(adev->vcn.external.jpeg_pitch);
739 		if (tmp == 0xDEADBEEF)
740 			break;
741 		udelay(1);
742 	}
743 
744 	if (i >= adev->usec_timeout)
745 		r = -ETIMEDOUT;
746 
747 	dma_fence_put(fence);
748 error:
749 	return r;
750 }
751