1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 
31 #include "amdgpu.h"
32 #include "amdgpu_pm.h"
33 #include "amdgpu_vcn.h"
34 #include "soc15d.h"
35 
36 /* Firmware Names */
37 #define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
38 #define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
39 #define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
40 #define FIRMWARE_ARCTURUS 	"amdgpu/arcturus_vcn.bin"
41 #define FIRMWARE_RENOIR 	"amdgpu/renoir_vcn.bin"
42 #define FIRMWARE_NAVI10 	"amdgpu/navi10_vcn.bin"
43 #define FIRMWARE_NAVI14 	"amdgpu/navi14_vcn.bin"
44 #define FIRMWARE_NAVI12 	"amdgpu/navi12_vcn.bin"
45 
46 MODULE_FIRMWARE(FIRMWARE_RAVEN);
47 MODULE_FIRMWARE(FIRMWARE_PICASSO);
48 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
49 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
50 MODULE_FIRMWARE(FIRMWARE_RENOIR);
51 MODULE_FIRMWARE(FIRMWARE_NAVI10);
52 MODULE_FIRMWARE(FIRMWARE_NAVI14);
53 MODULE_FIRMWARE(FIRMWARE_NAVI12);
54 
55 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
56 
57 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
58 {
59 	unsigned long bo_size, fw_shared_bo_size;
60 	const char *fw_name;
61 	const struct common_firmware_header *hdr;
62 	unsigned char fw_check;
63 	int i, r;
64 
65 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
66 	mutex_init(&adev->vcn.vcn_pg_lock);
67 	atomic_set(&adev->vcn.total_submission_cnt, 0);
68 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
69 		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
70 
71 	switch (adev->asic_type) {
72 	case CHIP_RAVEN:
73 		if (adev->rev_id >= 8)
74 			fw_name = FIRMWARE_RAVEN2;
75 		else if (adev->pdev->device == 0x15d8)
76 			fw_name = FIRMWARE_PICASSO;
77 		else
78 			fw_name = FIRMWARE_RAVEN;
79 		break;
80 	case CHIP_ARCTURUS:
81 		fw_name = FIRMWARE_ARCTURUS;
82 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
83 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
84 			adev->vcn.indirect_sram = true;
85 		break;
86 	case CHIP_RENOIR:
87 		fw_name = FIRMWARE_RENOIR;
88 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
89 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
90 			adev->vcn.indirect_sram = true;
91 		break;
92 	case CHIP_NAVI10:
93 		fw_name = FIRMWARE_NAVI10;
94 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
95 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
96 			adev->vcn.indirect_sram = true;
97 		break;
98 	case CHIP_NAVI14:
99 		fw_name = FIRMWARE_NAVI14;
100 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
101 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
102 			adev->vcn.indirect_sram = true;
103 		break;
104 	case CHIP_NAVI12:
105 		fw_name = FIRMWARE_NAVI12;
106 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
107 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
108 			adev->vcn.indirect_sram = true;
109 		break;
110 	default:
111 		return -EINVAL;
112 	}
113 
114 	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
115 	if (r) {
116 		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
117 			fw_name);
118 		return r;
119 	}
120 
121 	r = amdgpu_ucode_validate(adev->vcn.fw);
122 	if (r) {
123 		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
124 			fw_name);
125 		release_firmware(adev->vcn.fw);
126 		adev->vcn.fw = NULL;
127 		return r;
128 	}
129 
130 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
131 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
132 
133 	/* Bit 20-23, it is encode major and non-zero for new naming convention.
134 	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
135 	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
136 	 * is zero in old naming convention, this field is always zero so far.
137 	 * These four bits are used to tell which naming convention is present.
138 	 */
139 	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
140 	if (fw_check) {
141 		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
142 
143 		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
144 		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
145 		enc_major = fw_check;
146 		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
147 		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
148 		DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
149 			enc_major, enc_minor, dec_ver, vep, fw_rev);
150 	} else {
151 		unsigned int version_major, version_minor, family_id;
152 
153 		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
154 		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
155 		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
156 		DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
157 			version_major, version_minor, family_id);
158 	}
159 
160 	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
161 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
162 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
163 
164 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
165 		if (adev->vcn.harvest_config & (1 << i))
166 			continue;
167 
168 		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
169 						AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
170 						&adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
171 		if (r) {
172 			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
173 			return r;
174 		}
175 
176 		if (adev->vcn.indirect_sram) {
177 			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
178 					AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
179 					&adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
180 			if (r) {
181 				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
182 				return r;
183 			}
184 		}
185 
186 		r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
187 				PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
188 				&adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
189 		if (r) {
190 			dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r);
191 			return r;
192 		}
193 
194 		fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
195 		adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL);
196 	}
197 
198 	return 0;
199 }
200 
201 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
202 {
203 	int i, j;
204 
205 	cancel_delayed_work_sync(&adev->vcn.idle_work);
206 
207 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
208 		if (adev->vcn.harvest_config & (1 << j))
209 			continue;
210 
211 		kvfree(adev->vcn.inst[j].saved_shm_bo);
212 		amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
213 					  &adev->vcn.inst[j].fw_shared_gpu_addr,
214 					  (void **)&adev->vcn.inst[j].fw_shared_cpu_addr);
215 
216 		if (adev->vcn.indirect_sram) {
217 			amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
218 						  &adev->vcn.inst[j].dpg_sram_gpu_addr,
219 						  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
220 		}
221 		kvfree(adev->vcn.inst[j].saved_bo);
222 
223 		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
224 					  &adev->vcn.inst[j].gpu_addr,
225 					  (void **)&adev->vcn.inst[j].cpu_addr);
226 
227 		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
228 
229 		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
230 			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
231 	}
232 
233 	release_firmware(adev->vcn.fw);
234 	mutex_destroy(&adev->vcn.vcn_pg_lock);
235 
236 	return 0;
237 }
238 
239 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
240 {
241 	unsigned size;
242 	void *ptr;
243 	int i;
244 
245 	cancel_delayed_work_sync(&adev->vcn.idle_work);
246 
247 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
248 		if (adev->vcn.harvest_config & (1 << i))
249 			continue;
250 		if (adev->vcn.inst[i].vcpu_bo == NULL)
251 			return 0;
252 
253 		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
254 		ptr = adev->vcn.inst[i].cpu_addr;
255 
256 		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
257 		if (!adev->vcn.inst[i].saved_bo)
258 			return -ENOMEM;
259 
260 		memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
261 
262 		if (adev->vcn.inst[i].fw_shared_bo == NULL)
263 			return 0;
264 
265 		if (!adev->vcn.inst[i].saved_shm_bo)
266 			return -ENOMEM;
267 
268 		size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
269 		ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
270 
271 		memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size);
272 	}
273 	return 0;
274 }
275 
276 int amdgpu_vcn_resume(struct amdgpu_device *adev)
277 {
278 	unsigned size;
279 	void *ptr;
280 	int i;
281 
282 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
283 		if (adev->vcn.harvest_config & (1 << i))
284 			continue;
285 		if (adev->vcn.inst[i].vcpu_bo == NULL)
286 			return -EINVAL;
287 
288 		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
289 		ptr = adev->vcn.inst[i].cpu_addr;
290 
291 		if (adev->vcn.inst[i].saved_bo != NULL) {
292 			memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
293 			kvfree(adev->vcn.inst[i].saved_bo);
294 			adev->vcn.inst[i].saved_bo = NULL;
295 		} else {
296 			const struct common_firmware_header *hdr;
297 			unsigned offset;
298 
299 			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
300 			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
301 				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
302 				memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
303 					    le32_to_cpu(hdr->ucode_size_bytes));
304 				size -= le32_to_cpu(hdr->ucode_size_bytes);
305 				ptr += le32_to_cpu(hdr->ucode_size_bytes);
306 			}
307 			memset_io(ptr, 0, size);
308 		}
309 
310 		if (adev->vcn.inst[i].fw_shared_bo == NULL)
311 			return -EINVAL;
312 
313 		size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
314 		ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
315 
316 		if (adev->vcn.inst[i].saved_shm_bo != NULL)
317 			memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size);
318 		else
319 			memset_io(ptr, 0, size);
320 	}
321 	return 0;
322 }
323 
324 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
325 {
326 	struct amdgpu_device *adev =
327 		container_of(work, struct amdgpu_device, vcn.idle_work.work);
328 	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
329 	unsigned int i, j;
330 
331 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
332 		if (adev->vcn.harvest_config & (1 << j))
333 			continue;
334 
335 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
336 			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
337 		}
338 
339 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
340 			struct dpg_pause_state new_state;
341 
342 			if (fence[j] ||
343 				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
344 				new_state.fw_based = VCN_DPG_STATE__PAUSE;
345 			else
346 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
347 
348 			adev->vcn.pause_dpg_mode(adev, j, &new_state);
349 		}
350 
351 		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
352 		fences += fence[j];
353 	}
354 
355 	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
356 		amdgpu_gfx_off_ctrl(adev, true);
357 		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
358 		       AMD_PG_STATE_GATE);
359 	} else {
360 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
361 	}
362 }
363 
364 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
365 {
366 	struct amdgpu_device *adev = ring->adev;
367 
368 	atomic_inc(&adev->vcn.total_submission_cnt);
369 	cancel_delayed_work_sync(&adev->vcn.idle_work);
370 
371 	mutex_lock(&adev->vcn.vcn_pg_lock);
372 	amdgpu_gfx_off_ctrl(adev, false);
373 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
374 	       AMD_PG_STATE_UNGATE);
375 
376 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
377 		struct dpg_pause_state new_state;
378 
379 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
380 			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
381 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
382 		} else {
383 			unsigned int fences = 0;
384 			unsigned int i;
385 
386 			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
387 				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
388 
389 			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
390 				new_state.fw_based = VCN_DPG_STATE__PAUSE;
391 			else
392 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
393 		}
394 
395 		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
396 	}
397 	mutex_unlock(&adev->vcn.vcn_pg_lock);
398 }
399 
400 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
401 {
402 	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
403 		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
404 		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
405 
406 	atomic_dec(&ring->adev->vcn.total_submission_cnt);
407 
408 	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
409 }
410 
411 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
412 {
413 	struct amdgpu_device *adev = ring->adev;
414 	uint32_t tmp = 0;
415 	unsigned i;
416 	int r;
417 
418 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
419 	r = amdgpu_ring_alloc(ring, 3);
420 	if (r)
421 		return r;
422 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
423 	amdgpu_ring_write(ring, 0xDEADBEEF);
424 	amdgpu_ring_commit(ring);
425 	for (i = 0; i < adev->usec_timeout; i++) {
426 		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
427 		if (tmp == 0xDEADBEEF)
428 			break;
429 		udelay(1);
430 	}
431 
432 	if (i >= adev->usec_timeout)
433 		r = -ETIMEDOUT;
434 
435 	return r;
436 }
437 
438 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
439 				   struct amdgpu_bo *bo,
440 				   struct dma_fence **fence)
441 {
442 	struct amdgpu_device *adev = ring->adev;
443 	struct dma_fence *f = NULL;
444 	struct amdgpu_job *job;
445 	struct amdgpu_ib *ib;
446 	uint64_t addr;
447 	int i, r;
448 
449 	r = amdgpu_job_alloc_with_ib(adev, 64,
450 					AMDGPU_IB_POOL_DIRECT, &job);
451 	if (r)
452 		goto err;
453 
454 	ib = &job->ibs[0];
455 	addr = amdgpu_bo_gpu_offset(bo);
456 	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
457 	ib->ptr[1] = addr;
458 	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
459 	ib->ptr[3] = addr >> 32;
460 	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
461 	ib->ptr[5] = 0;
462 	for (i = 6; i < 16; i += 2) {
463 		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
464 		ib->ptr[i+1] = 0;
465 	}
466 	ib->length_dw = 16;
467 
468 	r = amdgpu_job_submit_direct(job, ring, &f);
469 	if (r)
470 		goto err_free;
471 
472 	amdgpu_bo_fence(bo, f, false);
473 	amdgpu_bo_unreserve(bo);
474 	amdgpu_bo_unref(&bo);
475 
476 	if (fence)
477 		*fence = dma_fence_get(f);
478 	dma_fence_put(f);
479 
480 	return 0;
481 
482 err_free:
483 	amdgpu_job_free(job);
484 
485 err:
486 	amdgpu_bo_unreserve(bo);
487 	amdgpu_bo_unref(&bo);
488 	return r;
489 }
490 
491 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
492 			      struct dma_fence **fence)
493 {
494 	struct amdgpu_device *adev = ring->adev;
495 	struct amdgpu_bo *bo = NULL;
496 	uint32_t *msg;
497 	int r, i;
498 
499 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
500 				      AMDGPU_GEM_DOMAIN_VRAM,
501 				      &bo, NULL, (void **)&msg);
502 	if (r)
503 		return r;
504 
505 	msg[0] = cpu_to_le32(0x00000028);
506 	msg[1] = cpu_to_le32(0x00000038);
507 	msg[2] = cpu_to_le32(0x00000001);
508 	msg[3] = cpu_to_le32(0x00000000);
509 	msg[4] = cpu_to_le32(handle);
510 	msg[5] = cpu_to_le32(0x00000000);
511 	msg[6] = cpu_to_le32(0x00000001);
512 	msg[7] = cpu_to_le32(0x00000028);
513 	msg[8] = cpu_to_le32(0x00000010);
514 	msg[9] = cpu_to_le32(0x00000000);
515 	msg[10] = cpu_to_le32(0x00000007);
516 	msg[11] = cpu_to_le32(0x00000000);
517 	msg[12] = cpu_to_le32(0x00000780);
518 	msg[13] = cpu_to_le32(0x00000440);
519 	for (i = 14; i < 1024; ++i)
520 		msg[i] = cpu_to_le32(0x0);
521 
522 	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
523 }
524 
525 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
526 			       struct dma_fence **fence)
527 {
528 	struct amdgpu_device *adev = ring->adev;
529 	struct amdgpu_bo *bo = NULL;
530 	uint32_t *msg;
531 	int r, i;
532 
533 	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
534 				      AMDGPU_GEM_DOMAIN_VRAM,
535 				      &bo, NULL, (void **)&msg);
536 	if (r)
537 		return r;
538 
539 	msg[0] = cpu_to_le32(0x00000028);
540 	msg[1] = cpu_to_le32(0x00000018);
541 	msg[2] = cpu_to_le32(0x00000000);
542 	msg[3] = cpu_to_le32(0x00000002);
543 	msg[4] = cpu_to_le32(handle);
544 	msg[5] = cpu_to_le32(0x00000000);
545 	for (i = 6; i < 1024; ++i)
546 		msg[i] = cpu_to_le32(0x0);
547 
548 	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
549 }
550 
551 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
552 {
553 	struct dma_fence *fence;
554 	long r;
555 
556 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
557 	if (r)
558 		goto error;
559 
560 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
561 	if (r)
562 		goto error;
563 
564 	r = dma_fence_wait_timeout(fence, false, timeout);
565 	if (r == 0)
566 		r = -ETIMEDOUT;
567 	else if (r > 0)
568 		r = 0;
569 
570 	dma_fence_put(fence);
571 error:
572 	return r;
573 }
574 
575 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
576 {
577 	struct amdgpu_device *adev = ring->adev;
578 	uint32_t rptr;
579 	unsigned i;
580 	int r;
581 
582 	if (amdgpu_sriov_vf(adev))
583 		return 0;
584 
585 	r = amdgpu_ring_alloc(ring, 16);
586 	if (r)
587 		return r;
588 
589 	rptr = amdgpu_ring_get_rptr(ring);
590 
591 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
592 	amdgpu_ring_commit(ring);
593 
594 	for (i = 0; i < adev->usec_timeout; i++) {
595 		if (amdgpu_ring_get_rptr(ring) != rptr)
596 			break;
597 		udelay(1);
598 	}
599 
600 	if (i >= adev->usec_timeout)
601 		r = -ETIMEDOUT;
602 
603 	return r;
604 }
605 
606 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
607 					 struct amdgpu_bo *bo,
608 					 struct dma_fence **fence)
609 {
610 	const unsigned ib_size_dw = 16;
611 	struct amdgpu_job *job;
612 	struct amdgpu_ib *ib;
613 	struct dma_fence *f = NULL;
614 	uint64_t addr;
615 	int i, r;
616 
617 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
618 					AMDGPU_IB_POOL_DIRECT, &job);
619 	if (r)
620 		return r;
621 
622 	ib = &job->ibs[0];
623 	addr = amdgpu_bo_gpu_offset(bo);
624 
625 	ib->length_dw = 0;
626 	ib->ptr[ib->length_dw++] = 0x00000018;
627 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
628 	ib->ptr[ib->length_dw++] = handle;
629 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
630 	ib->ptr[ib->length_dw++] = addr;
631 	ib->ptr[ib->length_dw++] = 0x0000000b;
632 
633 	ib->ptr[ib->length_dw++] = 0x00000014;
634 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
635 	ib->ptr[ib->length_dw++] = 0x0000001c;
636 	ib->ptr[ib->length_dw++] = 0x00000000;
637 	ib->ptr[ib->length_dw++] = 0x00000000;
638 
639 	ib->ptr[ib->length_dw++] = 0x00000008;
640 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
641 
642 	for (i = ib->length_dw; i < ib_size_dw; ++i)
643 		ib->ptr[i] = 0x0;
644 
645 	r = amdgpu_job_submit_direct(job, ring, &f);
646 	if (r)
647 		goto err;
648 
649 	if (fence)
650 		*fence = dma_fence_get(f);
651 	dma_fence_put(f);
652 
653 	return 0;
654 
655 err:
656 	amdgpu_job_free(job);
657 	return r;
658 }
659 
660 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
661 					  struct amdgpu_bo *bo,
662 					  struct dma_fence **fence)
663 {
664 	const unsigned ib_size_dw = 16;
665 	struct amdgpu_job *job;
666 	struct amdgpu_ib *ib;
667 	struct dma_fence *f = NULL;
668 	uint64_t addr;
669 	int i, r;
670 
671 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
672 					AMDGPU_IB_POOL_DIRECT, &job);
673 	if (r)
674 		return r;
675 
676 	ib = &job->ibs[0];
677 	addr = amdgpu_bo_gpu_offset(bo);
678 
679 	ib->length_dw = 0;
680 	ib->ptr[ib->length_dw++] = 0x00000018;
681 	ib->ptr[ib->length_dw++] = 0x00000001;
682 	ib->ptr[ib->length_dw++] = handle;
683 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
684 	ib->ptr[ib->length_dw++] = addr;
685 	ib->ptr[ib->length_dw++] = 0x0000000b;
686 
687 	ib->ptr[ib->length_dw++] = 0x00000014;
688 	ib->ptr[ib->length_dw++] = 0x00000002;
689 	ib->ptr[ib->length_dw++] = 0x0000001c;
690 	ib->ptr[ib->length_dw++] = 0x00000000;
691 	ib->ptr[ib->length_dw++] = 0x00000000;
692 
693 	ib->ptr[ib->length_dw++] = 0x00000008;
694 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
695 
696 	for (i = ib->length_dw; i < ib_size_dw; ++i)
697 		ib->ptr[i] = 0x0;
698 
699 	r = amdgpu_job_submit_direct(job, ring, &f);
700 	if (r)
701 		goto err;
702 
703 	if (fence)
704 		*fence = dma_fence_get(f);
705 	dma_fence_put(f);
706 
707 	return 0;
708 
709 err:
710 	amdgpu_job_free(job);
711 	return r;
712 }
713 
714 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
715 {
716 	struct dma_fence *fence = NULL;
717 	struct amdgpu_bo *bo = NULL;
718 	long r;
719 
720 	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
721 				      AMDGPU_GEM_DOMAIN_VRAM,
722 				      &bo, NULL, NULL);
723 	if (r)
724 		return r;
725 
726 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
727 	if (r)
728 		goto error;
729 
730 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
731 	if (r)
732 		goto error;
733 
734 	r = dma_fence_wait_timeout(fence, false, timeout);
735 	if (r == 0)
736 		r = -ETIMEDOUT;
737 	else if (r > 0)
738 		r = 0;
739 
740 error:
741 	dma_fence_put(fence);
742 	amdgpu_bo_unreserve(bo);
743 	amdgpu_bo_unref(&bo);
744 	return r;
745 }
746