1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27 
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
36 #include "cikd.h"
37 
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT_MS	1000
40 
41 /* Firmware Names */
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE	"radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI 	"radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI 	"radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII 	"radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS	"radeon/mullins_vce.bin"
48 #endif
49 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
52 
53 #ifdef CONFIG_DRM_AMDGPU_CIK
54 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
55 MODULE_FIRMWARE(FIRMWARE_KABINI);
56 MODULE_FIRMWARE(FIRMWARE_KAVERI);
57 MODULE_FIRMWARE(FIRMWARE_HAWAII);
58 MODULE_FIRMWARE(FIRMWARE_MULLINS);
59 #endif
60 MODULE_FIRMWARE(FIRMWARE_TONGA);
61 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
62 MODULE_FIRMWARE(FIRMWARE_FIJI);
63 
64 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
65 
66 /**
67  * amdgpu_vce_init - allocate memory, load vce firmware
68  *
69  * @adev: amdgpu_device pointer
70  *
71  * First step to get VCE online, allocate memory and load the firmware
72  */
73 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
74 {
75 	const char *fw_name;
76 	const struct common_firmware_header *hdr;
77 	unsigned ucode_version, version_major, version_minor, binary_id;
78 	int i, r;
79 
80 	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
81 
82 	switch (adev->asic_type) {
83 #ifdef CONFIG_DRM_AMDGPU_CIK
84 	case CHIP_BONAIRE:
85 		fw_name = FIRMWARE_BONAIRE;
86 		break;
87 	case CHIP_KAVERI:
88 		fw_name = FIRMWARE_KAVERI;
89 		break;
90 	case CHIP_KABINI:
91 		fw_name = FIRMWARE_KABINI;
92 		break;
93 	case CHIP_HAWAII:
94 		fw_name = FIRMWARE_HAWAII;
95 		break;
96 	case CHIP_MULLINS:
97 		fw_name = FIRMWARE_MULLINS;
98 		break;
99 #endif
100 	case CHIP_TONGA:
101 		fw_name = FIRMWARE_TONGA;
102 		break;
103 	case CHIP_CARRIZO:
104 		fw_name = FIRMWARE_CARRIZO;
105 		break;
106 	case CHIP_FIJI:
107 		fw_name = FIRMWARE_FIJI;
108 		break;
109 
110 	default:
111 		return -EINVAL;
112 	}
113 
114 	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
115 	if (r) {
116 		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
117 			fw_name);
118 		return r;
119 	}
120 
121 	r = amdgpu_ucode_validate(adev->vce.fw);
122 	if (r) {
123 		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
124 			fw_name);
125 		release_firmware(adev->vce.fw);
126 		adev->vce.fw = NULL;
127 		return r;
128 	}
129 
130 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
131 
132 	ucode_version = le32_to_cpu(hdr->ucode_version);
133 	version_major = (ucode_version >> 20) & 0xfff;
134 	version_minor = (ucode_version >> 8) & 0xfff;
135 	binary_id = ucode_version & 0xff;
136 	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
137 		version_major, version_minor, binary_id);
138 	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
139 				(binary_id << 8));
140 
141 	/* allocate firmware, stack and heap BO */
142 
143 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
144 			     AMDGPU_GEM_DOMAIN_VRAM,
145 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
146 			     NULL, NULL, &adev->vce.vcpu_bo);
147 	if (r) {
148 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
149 		return r;
150 	}
151 
152 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
153 	if (r) {
154 		amdgpu_bo_unref(&adev->vce.vcpu_bo);
155 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
156 		return r;
157 	}
158 
159 	r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
160 			  &adev->vce.gpu_addr);
161 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
162 	if (r) {
163 		amdgpu_bo_unref(&adev->vce.vcpu_bo);
164 		dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
165 		return r;
166 	}
167 
168 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
169 		atomic_set(&adev->vce.handles[i], 0);
170 		adev->vce.filp[i] = NULL;
171 	}
172 
173 	return 0;
174 }
175 
176 /**
177  * amdgpu_vce_fini - free memory
178  *
179  * @adev: amdgpu_device pointer
180  *
181  * Last step on VCE teardown, free firmware memory
182  */
183 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
184 {
185 	if (adev->vce.vcpu_bo == NULL)
186 		return 0;
187 
188 	amdgpu_bo_unref(&adev->vce.vcpu_bo);
189 
190 	amdgpu_ring_fini(&adev->vce.ring[0]);
191 	amdgpu_ring_fini(&adev->vce.ring[1]);
192 
193 	release_firmware(adev->vce.fw);
194 
195 	return 0;
196 }
197 
198 /**
199  * amdgpu_vce_suspend - unpin VCE fw memory
200  *
201  * @adev: amdgpu_device pointer
202  *
203  */
204 int amdgpu_vce_suspend(struct amdgpu_device *adev)
205 {
206 	int i;
207 
208 	if (adev->vce.vcpu_bo == NULL)
209 		return 0;
210 
211 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
212 		if (atomic_read(&adev->vce.handles[i]))
213 			break;
214 
215 	if (i == AMDGPU_MAX_VCE_HANDLES)
216 		return 0;
217 
218 	/* TODO: suspending running encoding sessions isn't supported */
219 	return -EINVAL;
220 }
221 
222 /**
223  * amdgpu_vce_resume - pin VCE fw memory
224  *
225  * @adev: amdgpu_device pointer
226  *
227  */
228 int amdgpu_vce_resume(struct amdgpu_device *adev)
229 {
230 	void *cpu_addr;
231 	const struct common_firmware_header *hdr;
232 	unsigned offset;
233 	int r;
234 
235 	if (adev->vce.vcpu_bo == NULL)
236 		return -EINVAL;
237 
238 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
239 	if (r) {
240 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
241 		return r;
242 	}
243 
244 	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
245 	if (r) {
246 		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
247 		dev_err(adev->dev, "(%d) VCE map failed\n", r);
248 		return r;
249 	}
250 
251 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
252 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
253 	memcpy(cpu_addr, (adev->vce.fw->data) + offset,
254 		(adev->vce.fw->size) - offset);
255 
256 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
257 
258 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
259 
260 	return 0;
261 }
262 
263 /**
264  * amdgpu_vce_idle_work_handler - power off VCE
265  *
266  * @work: pointer to work structure
267  *
268  * power of VCE when it's not used any more
269  */
270 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
271 {
272 	struct amdgpu_device *adev =
273 		container_of(work, struct amdgpu_device, vce.idle_work.work);
274 
275 	if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
276 	    (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
277 		if (adev->pm.dpm_enabled) {
278 			amdgpu_dpm_enable_vce(adev, false);
279 		} else {
280 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
281 		}
282 	} else {
283 		schedule_delayed_work(&adev->vce.idle_work,
284 				      msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
285 	}
286 }
287 
288 /**
289  * amdgpu_vce_note_usage - power up VCE
290  *
291  * @adev: amdgpu_device pointer
292  *
293  * Make sure VCE is powerd up when we want to use it
294  */
295 static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
296 {
297 	bool streams_changed = false;
298 	bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
299 	set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
300 					    msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
301 
302 	if (adev->pm.dpm_enabled) {
303 		/* XXX figure out if the streams changed */
304 		streams_changed = false;
305 	}
306 
307 	if (set_clocks || streams_changed) {
308 		if (adev->pm.dpm_enabled) {
309 			amdgpu_dpm_enable_vce(adev, true);
310 		} else {
311 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
312 		}
313 	}
314 }
315 
316 /**
317  * amdgpu_vce_free_handles - free still open VCE handles
318  *
319  * @adev: amdgpu_device pointer
320  * @filp: drm file pointer
321  *
322  * Close all VCE handles still open by this file pointer
323  */
324 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
325 {
326 	struct amdgpu_ring *ring = &adev->vce.ring[0];
327 	int i, r;
328 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
329 		uint32_t handle = atomic_read(&adev->vce.handles[i]);
330 		if (!handle || adev->vce.filp[i] != filp)
331 			continue;
332 
333 		amdgpu_vce_note_usage(adev);
334 
335 		r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
336 		if (r)
337 			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
338 
339 		adev->vce.filp[i] = NULL;
340 		atomic_set(&adev->vce.handles[i], 0);
341 	}
342 }
343 
344 static int amdgpu_vce_free_job(
345 	struct amdgpu_job *job)
346 {
347 	amdgpu_ib_free(job->adev, job->ibs);
348 	kfree(job->ibs);
349 	return 0;
350 }
351 
352 /**
353  * amdgpu_vce_get_create_msg - generate a VCE create msg
354  *
355  * @adev: amdgpu_device pointer
356  * @ring: ring we should submit the msg to
357  * @handle: VCE session handle to use
358  * @fence: optional fence to return
359  *
360  * Open up a stream for HW test
361  */
362 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
363 			      struct fence **fence)
364 {
365 	const unsigned ib_size_dw = 1024;
366 	struct amdgpu_ib *ib = NULL;
367 	struct fence *f = NULL;
368 	struct amdgpu_device *adev = ring->adev;
369 	uint64_t dummy;
370 	int i, r;
371 
372 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
373 	if (!ib)
374 		return -ENOMEM;
375 	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
376 	if (r) {
377 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
378 		kfree(ib);
379 		return r;
380 	}
381 
382 	dummy = ib->gpu_addr + 1024;
383 
384 	/* stitch together an VCE create msg */
385 	ib->length_dw = 0;
386 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
387 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
388 	ib->ptr[ib->length_dw++] = handle;
389 
390 	ib->ptr[ib->length_dw++] = 0x00000030; /* len */
391 	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
392 	ib->ptr[ib->length_dw++] = 0x00000000;
393 	ib->ptr[ib->length_dw++] = 0x00000042;
394 	ib->ptr[ib->length_dw++] = 0x0000000a;
395 	ib->ptr[ib->length_dw++] = 0x00000001;
396 	ib->ptr[ib->length_dw++] = 0x00000080;
397 	ib->ptr[ib->length_dw++] = 0x00000060;
398 	ib->ptr[ib->length_dw++] = 0x00000100;
399 	ib->ptr[ib->length_dw++] = 0x00000100;
400 	ib->ptr[ib->length_dw++] = 0x0000000c;
401 	ib->ptr[ib->length_dw++] = 0x00000000;
402 
403 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
404 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
405 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
406 	ib->ptr[ib->length_dw++] = dummy;
407 	ib->ptr[ib->length_dw++] = 0x00000001;
408 
409 	for (i = ib->length_dw; i < ib_size_dw; ++i)
410 		ib->ptr[i] = 0x0;
411 
412 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
413 						 &amdgpu_vce_free_job,
414 						 AMDGPU_FENCE_OWNER_UNDEFINED,
415 						 &f);
416 	if (r)
417 		goto err;
418 	if (fence)
419 		*fence = fence_get(f);
420 	fence_put(f);
421 	if (amdgpu_enable_scheduler)
422 		return 0;
423 err:
424 	amdgpu_ib_free(adev, ib);
425 	kfree(ib);
426 	return r;
427 }
428 
429 /**
430  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
431  *
432  * @adev: amdgpu_device pointer
433  * @ring: ring we should submit the msg to
434  * @handle: VCE session handle to use
435  * @fence: optional fence to return
436  *
437  * Close up a stream for HW test or if userspace failed to do so
438  */
439 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
440 			       struct fence **fence)
441 {
442 	const unsigned ib_size_dw = 1024;
443 	struct amdgpu_ib *ib = NULL;
444 	struct fence *f = NULL;
445 	struct amdgpu_device *adev = ring->adev;
446 	uint64_t dummy;
447 	int i, r;
448 
449 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
450 	if (!ib)
451 		return -ENOMEM;
452 
453 	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
454 	if (r) {
455 		kfree(ib);
456 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
457 		return r;
458 	}
459 
460 	dummy = ib->gpu_addr + 1024;
461 
462 	/* stitch together an VCE destroy msg */
463 	ib->length_dw = 0;
464 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
465 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
466 	ib->ptr[ib->length_dw++] = handle;
467 
468 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
469 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
470 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
471 	ib->ptr[ib->length_dw++] = dummy;
472 	ib->ptr[ib->length_dw++] = 0x00000001;
473 
474 	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
475 	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
476 
477 	for (i = ib->length_dw; i < ib_size_dw; ++i)
478 		ib->ptr[i] = 0x0;
479 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
480 						 &amdgpu_vce_free_job,
481 						 AMDGPU_FENCE_OWNER_UNDEFINED,
482 						 &f);
483 	if (r)
484 		goto err;
485 	if (fence)
486 		*fence = fence_get(f);
487 	fence_put(f);
488 	if (amdgpu_enable_scheduler)
489 		return 0;
490 err:
491 	amdgpu_ib_free(adev, ib);
492 	kfree(ib);
493 	return r;
494 }
495 
496 /**
497  * amdgpu_vce_cs_reloc - command submission relocation
498  *
499  * @p: parser context
500  * @lo: address of lower dword
501  * @hi: address of higher dword
502  * @size: minimum size
503  *
504  * Patch relocation inside command stream with real buffer address
505  */
506 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
507 			       int lo, int hi, unsigned size, uint32_t index)
508 {
509 	struct amdgpu_bo_va_mapping *mapping;
510 	struct amdgpu_ib *ib = &p->ibs[ib_idx];
511 	struct amdgpu_bo *bo;
512 	uint64_t addr;
513 
514 	if (index == 0xffffffff)
515 		index = 0;
516 
517 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
518 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
519 	addr += ((uint64_t)size) * ((uint64_t)index);
520 
521 	mapping = amdgpu_cs_find_mapping(p, addr, &bo);
522 	if (mapping == NULL) {
523 		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
524 			  addr, lo, hi, size, index);
525 		return -EINVAL;
526 	}
527 
528 	if ((addr + (uint64_t)size) >
529 	    ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
530 		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
531 			  addr, lo, hi);
532 		return -EINVAL;
533 	}
534 
535 	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
536 	addr += amdgpu_bo_gpu_offset(bo);
537 	addr -= ((uint64_t)size) * ((uint64_t)index);
538 
539 	ib->ptr[lo] = addr & 0xFFFFFFFF;
540 	ib->ptr[hi] = addr >> 32;
541 
542 	return 0;
543 }
544 
545 /**
546  * amdgpu_vce_validate_handle - validate stream handle
547  *
548  * @p: parser context
549  * @handle: handle to validate
550  * @allocated: allocated a new handle?
551  *
552  * Validates the handle and return the found session index or -EINVAL
553  * we we don't have another free session index.
554  */
555 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
556 				      uint32_t handle, bool *allocated)
557 {
558 	unsigned i;
559 
560 	*allocated = false;
561 
562 	/* validate the handle */
563 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
564 		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
565 			if (p->adev->vce.filp[i] != p->filp) {
566 				DRM_ERROR("VCE handle collision detected!\n");
567 				return -EINVAL;
568 			}
569 			return i;
570 		}
571 	}
572 
573 	/* handle not found try to alloc a new one */
574 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
575 		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
576 			p->adev->vce.filp[i] = p->filp;
577 			p->adev->vce.img_size[i] = 0;
578 			*allocated = true;
579 			return i;
580 		}
581 	}
582 
583 	DRM_ERROR("No more free VCE handles!\n");
584 	return -EINVAL;
585 }
586 
587 /**
588  * amdgpu_vce_cs_parse - parse and validate the command stream
589  *
590  * @p: parser context
591  *
592  */
593 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
594 {
595 	struct amdgpu_ib *ib = &p->ibs[ib_idx];
596 	unsigned fb_idx = 0, bs_idx = 0;
597 	int session_idx = -1;
598 	bool destroyed = false;
599 	bool created = false;
600 	bool allocated = false;
601 	uint32_t tmp, handle = 0;
602 	uint32_t *size = &tmp;
603 	int i, r = 0, idx = 0;
604 
605 	amdgpu_vce_note_usage(p->adev);
606 
607 	while (idx < ib->length_dw) {
608 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
609 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
610 
611 		if ((len < 8) || (len & 3)) {
612 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
613 			r = -EINVAL;
614 			goto out;
615 		}
616 
617 		if (destroyed) {
618 			DRM_ERROR("No other command allowed after destroy!\n");
619 			r = -EINVAL;
620 			goto out;
621 		}
622 
623 		switch (cmd) {
624 		case 0x00000001: // session
625 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
626 			session_idx = amdgpu_vce_validate_handle(p, handle,
627 								 &allocated);
628 			if (session_idx < 0)
629 				return session_idx;
630 			size = &p->adev->vce.img_size[session_idx];
631 			break;
632 
633 		case 0x00000002: // task info
634 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
635 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
636 			break;
637 
638 		case 0x01000001: // create
639 			created = true;
640 			if (!allocated) {
641 				DRM_ERROR("Handle already in use!\n");
642 				r = -EINVAL;
643 				goto out;
644 			}
645 
646 			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
647 				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
648 				8 * 3 / 2;
649 			break;
650 
651 		case 0x04000001: // config extension
652 		case 0x04000002: // pic control
653 		case 0x04000005: // rate control
654 		case 0x04000007: // motion estimation
655 		case 0x04000008: // rdo
656 		case 0x04000009: // vui
657 		case 0x05000002: // auxiliary buffer
658 			break;
659 
660 		case 0x03000001: // encode
661 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
662 						*size, 0);
663 			if (r)
664 				goto out;
665 
666 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
667 						*size / 3, 0);
668 			if (r)
669 				goto out;
670 			break;
671 
672 		case 0x02000001: // destroy
673 			destroyed = true;
674 			break;
675 
676 		case 0x05000001: // context buffer
677 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
678 						*size * 2, 0);
679 			if (r)
680 				goto out;
681 			break;
682 
683 		case 0x05000004: // video bitstream buffer
684 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
685 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
686 						tmp, bs_idx);
687 			if (r)
688 				goto out;
689 			break;
690 
691 		case 0x05000005: // feedback buffer
692 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
693 						4096, fb_idx);
694 			if (r)
695 				goto out;
696 			break;
697 
698 		default:
699 			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
700 			r = -EINVAL;
701 			goto out;
702 		}
703 
704 		if (session_idx == -1) {
705 			DRM_ERROR("no session command at start of IB\n");
706 			r = -EINVAL;
707 			goto out;
708 		}
709 
710 		idx += len / 4;
711 	}
712 
713 	if (allocated && !created) {
714 		DRM_ERROR("New session without create command!\n");
715 		r = -ENOENT;
716 	}
717 
718 out:
719 	if ((!r && destroyed) || (r && allocated)) {
720 		/*
721 		 * IB contains a destroy msg or we have allocated an
722 		 * handle and got an error, anyway free the handle
723 		 */
724 		for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
725 			atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
726 	}
727 
728 	return r;
729 }
730 
731 /**
732  * amdgpu_vce_ring_emit_semaphore - emit a semaphore command
733  *
734  * @ring: engine to use
735  * @semaphore: address of semaphore
736  * @emit_wait: true=emit wait, false=emit signal
737  *
738  */
739 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
740 				    struct amdgpu_semaphore *semaphore,
741 				    bool emit_wait)
742 {
743 	uint64_t addr = semaphore->gpu_addr;
744 
745 	amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
746 	amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
747 	amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
748 	amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
749 	if (!emit_wait)
750 		amdgpu_ring_write(ring, VCE_CMD_END);
751 
752 	return true;
753 }
754 
755 /**
756  * amdgpu_vce_ring_emit_ib - execute indirect buffer
757  *
758  * @ring: engine to use
759  * @ib: the IB to execute
760  *
761  */
762 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
763 {
764 	amdgpu_ring_write(ring, VCE_CMD_IB);
765 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
766 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
767 	amdgpu_ring_write(ring, ib->length_dw);
768 }
769 
770 /**
771  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
772  *
773  * @ring: engine to use
774  * @fence: the fence
775  *
776  */
777 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
778 				unsigned flags)
779 {
780 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
781 
782 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
783 	amdgpu_ring_write(ring, addr);
784 	amdgpu_ring_write(ring, upper_32_bits(addr));
785 	amdgpu_ring_write(ring, seq);
786 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
787 	amdgpu_ring_write(ring, VCE_CMD_END);
788 }
789 
790 /**
791  * amdgpu_vce_ring_test_ring - test if VCE ring is working
792  *
793  * @ring: the engine to test on
794  *
795  */
796 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
797 {
798 	struct amdgpu_device *adev = ring->adev;
799 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
800 	unsigned i;
801 	int r;
802 
803 	r = amdgpu_ring_lock(ring, 16);
804 	if (r) {
805 		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
806 			  ring->idx, r);
807 		return r;
808 	}
809 	amdgpu_ring_write(ring, VCE_CMD_END);
810 	amdgpu_ring_unlock_commit(ring);
811 
812 	for (i = 0; i < adev->usec_timeout; i++) {
813 		if (amdgpu_ring_get_rptr(ring) != rptr)
814 			break;
815 		DRM_UDELAY(1);
816 	}
817 
818 	if (i < adev->usec_timeout) {
819 		DRM_INFO("ring test on %d succeeded in %d usecs\n",
820 			 ring->idx, i);
821 	} else {
822 		DRM_ERROR("amdgpu: ring %d test failed\n",
823 			  ring->idx);
824 		r = -ETIMEDOUT;
825 	}
826 
827 	return r;
828 }
829 
830 /**
831  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
832  *
833  * @ring: the engine to test on
834  *
835  */
836 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
837 {
838 	struct fence *fence = NULL;
839 	int r;
840 
841 	/* skip vce ring1 ib test for now, since it's not reliable */
842 	if (ring == &ring->adev->vce.ring[1])
843 		return 0;
844 
845 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
846 	if (r) {
847 		DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
848 		goto error;
849 	}
850 
851 	r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
852 	if (r) {
853 		DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
854 		goto error;
855 	}
856 
857 	r = fence_wait(fence, false);
858 	if (r) {
859 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
860 	} else {
861 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
862 	}
863 error:
864 	fence_put(fence);
865 	return r;
866 }
867