xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c (revision bbde9fc1824aab58bc78c084163007dd6c03fe5b)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27 
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
36 #include "cikd.h"
37 
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT_MS	1000
40 
41 /* Firmware Names */
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE	"radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI 	"radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI 	"radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII 	"radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS	"radeon/mullins_vce.bin"
48 #endif
49 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
51 
52 #ifdef CONFIG_DRM_AMDGPU_CIK
53 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
54 MODULE_FIRMWARE(FIRMWARE_KABINI);
55 MODULE_FIRMWARE(FIRMWARE_KAVERI);
56 MODULE_FIRMWARE(FIRMWARE_HAWAII);
57 MODULE_FIRMWARE(FIRMWARE_MULLINS);
58 #endif
59 MODULE_FIRMWARE(FIRMWARE_TONGA);
60 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
61 
62 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
63 
64 /**
65  * amdgpu_vce_init - allocate memory, load vce firmware
66  *
67  * @adev: amdgpu_device pointer
68  *
69  * First step to get VCE online, allocate memory and load the firmware
70  */
71 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
72 {
73 	const char *fw_name;
74 	const struct common_firmware_header *hdr;
75 	unsigned ucode_version, version_major, version_minor, binary_id;
76 	int i, r;
77 
78 	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
79 
80 	switch (adev->asic_type) {
81 #ifdef CONFIG_DRM_AMDGPU_CIK
82 	case CHIP_BONAIRE:
83 		fw_name = FIRMWARE_BONAIRE;
84 		break;
85 	case CHIP_KAVERI:
86 		fw_name = FIRMWARE_KAVERI;
87 		break;
88 	case CHIP_KABINI:
89 		fw_name = FIRMWARE_KABINI;
90 		break;
91 	case CHIP_HAWAII:
92 		fw_name = FIRMWARE_HAWAII;
93 		break;
94 	case CHIP_MULLINS:
95 		fw_name = FIRMWARE_MULLINS;
96 		break;
97 #endif
98 	case CHIP_TONGA:
99 		fw_name = FIRMWARE_TONGA;
100 		break;
101 	case CHIP_CARRIZO:
102 		fw_name = FIRMWARE_CARRIZO;
103 		break;
104 
105 	default:
106 		return -EINVAL;
107 	}
108 
109 	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
110 	if (r) {
111 		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
112 			fw_name);
113 		return r;
114 	}
115 
116 	r = amdgpu_ucode_validate(adev->vce.fw);
117 	if (r) {
118 		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
119 			fw_name);
120 		release_firmware(adev->vce.fw);
121 		adev->vce.fw = NULL;
122 		return r;
123 	}
124 
125 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
126 
127 	ucode_version = le32_to_cpu(hdr->ucode_version);
128 	version_major = (ucode_version >> 20) & 0xfff;
129 	version_minor = (ucode_version >> 8) & 0xfff;
130 	binary_id = ucode_version & 0xff;
131 	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
132 		version_major, version_minor, binary_id);
133 	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
134 				(binary_id << 8));
135 
136 	/* allocate firmware, stack and heap BO */
137 
138 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
139 			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
140 	if (r) {
141 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
142 		return r;
143 	}
144 
145 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
146 	if (r) {
147 		amdgpu_bo_unref(&adev->vce.vcpu_bo);
148 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
149 		return r;
150 	}
151 
152 	r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
153 			  &adev->vce.gpu_addr);
154 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
155 	if (r) {
156 		amdgpu_bo_unref(&adev->vce.vcpu_bo);
157 		dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
158 		return r;
159 	}
160 
161 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
162 		atomic_set(&adev->vce.handles[i], 0);
163 		adev->vce.filp[i] = NULL;
164 	}
165 
166 	return 0;
167 }
168 
169 /**
170  * amdgpu_vce_fini - free memory
171  *
172  * @adev: amdgpu_device pointer
173  *
174  * Last step on VCE teardown, free firmware memory
175  */
176 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
177 {
178 	if (adev->vce.vcpu_bo == NULL)
179 		return 0;
180 
181 	amdgpu_bo_unref(&adev->vce.vcpu_bo);
182 
183 	amdgpu_ring_fini(&adev->vce.ring[0]);
184 	amdgpu_ring_fini(&adev->vce.ring[1]);
185 
186 	release_firmware(adev->vce.fw);
187 
188 	return 0;
189 }
190 
191 /**
192  * amdgpu_vce_suspend - unpin VCE fw memory
193  *
194  * @adev: amdgpu_device pointer
195  *
196  */
197 int amdgpu_vce_suspend(struct amdgpu_device *adev)
198 {
199 	int i;
200 
201 	if (adev->vce.vcpu_bo == NULL)
202 		return 0;
203 
204 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
205 		if (atomic_read(&adev->vce.handles[i]))
206 			break;
207 
208 	if (i == AMDGPU_MAX_VCE_HANDLES)
209 		return 0;
210 
211 	/* TODO: suspending running encoding sessions isn't supported */
212 	return -EINVAL;
213 }
214 
215 /**
216  * amdgpu_vce_resume - pin VCE fw memory
217  *
218  * @adev: amdgpu_device pointer
219  *
220  */
221 int amdgpu_vce_resume(struct amdgpu_device *adev)
222 {
223 	void *cpu_addr;
224 	const struct common_firmware_header *hdr;
225 	unsigned offset;
226 	int r;
227 
228 	if (adev->vce.vcpu_bo == NULL)
229 		return -EINVAL;
230 
231 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
232 	if (r) {
233 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
234 		return r;
235 	}
236 
237 	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
238 	if (r) {
239 		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
240 		dev_err(adev->dev, "(%d) VCE map failed\n", r);
241 		return r;
242 	}
243 
244 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
245 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
246 	memcpy(cpu_addr, (adev->vce.fw->data) + offset,
247 		(adev->vce.fw->size) - offset);
248 
249 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
250 
251 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
252 
253 	return 0;
254 }
255 
256 /**
257  * amdgpu_vce_idle_work_handler - power off VCE
258  *
259  * @work: pointer to work structure
260  *
261  * power of VCE when it's not used any more
262  */
263 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
264 {
265 	struct amdgpu_device *adev =
266 		container_of(work, struct amdgpu_device, vce.idle_work.work);
267 
268 	if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
269 	    (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
270 		if (adev->pm.dpm_enabled) {
271 			amdgpu_dpm_enable_vce(adev, false);
272 		} else {
273 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
274 		}
275 	} else {
276 		schedule_delayed_work(&adev->vce.idle_work,
277 				      msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
278 	}
279 }
280 
281 /**
282  * amdgpu_vce_note_usage - power up VCE
283  *
284  * @adev: amdgpu_device pointer
285  *
286  * Make sure VCE is powerd up when we want to use it
287  */
288 static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
289 {
290 	bool streams_changed = false;
291 	bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
292 	set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
293 					    msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
294 
295 	if (adev->pm.dpm_enabled) {
296 		/* XXX figure out if the streams changed */
297 		streams_changed = false;
298 	}
299 
300 	if (set_clocks || streams_changed) {
301 		if (adev->pm.dpm_enabled) {
302 			amdgpu_dpm_enable_vce(adev, true);
303 		} else {
304 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
305 		}
306 	}
307 }
308 
309 /**
310  * amdgpu_vce_free_handles - free still open VCE handles
311  *
312  * @adev: amdgpu_device pointer
313  * @filp: drm file pointer
314  *
315  * Close all VCE handles still open by this file pointer
316  */
317 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
318 {
319 	struct amdgpu_ring *ring = &adev->vce.ring[0];
320 	int i, r;
321 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
322 		uint32_t handle = atomic_read(&adev->vce.handles[i]);
323 		if (!handle || adev->vce.filp[i] != filp)
324 			continue;
325 
326 		amdgpu_vce_note_usage(adev);
327 
328 		r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
329 		if (r)
330 			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
331 
332 		adev->vce.filp[i] = NULL;
333 		atomic_set(&adev->vce.handles[i], 0);
334 	}
335 }
336 
337 /**
338  * amdgpu_vce_get_create_msg - generate a VCE create msg
339  *
340  * @adev: amdgpu_device pointer
341  * @ring: ring we should submit the msg to
342  * @handle: VCE session handle to use
343  * @fence: optional fence to return
344  *
345  * Open up a stream for HW test
346  */
347 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
348 			      struct amdgpu_fence **fence)
349 {
350 	const unsigned ib_size_dw = 1024;
351 	struct amdgpu_ib ib;
352 	uint64_t dummy;
353 	int i, r;
354 
355 	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
356 	if (r) {
357 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
358 		return r;
359 	}
360 
361 	dummy = ib.gpu_addr + 1024;
362 
363 	/* stitch together an VCE create msg */
364 	ib.length_dw = 0;
365 	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
366 	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
367 	ib.ptr[ib.length_dw++] = handle;
368 
369 	ib.ptr[ib.length_dw++] = 0x00000030; /* len */
370 	ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
371 	ib.ptr[ib.length_dw++] = 0x00000000;
372 	ib.ptr[ib.length_dw++] = 0x00000042;
373 	ib.ptr[ib.length_dw++] = 0x0000000a;
374 	ib.ptr[ib.length_dw++] = 0x00000001;
375 	ib.ptr[ib.length_dw++] = 0x00000080;
376 	ib.ptr[ib.length_dw++] = 0x00000060;
377 	ib.ptr[ib.length_dw++] = 0x00000100;
378 	ib.ptr[ib.length_dw++] = 0x00000100;
379 	ib.ptr[ib.length_dw++] = 0x0000000c;
380 	ib.ptr[ib.length_dw++] = 0x00000000;
381 
382 	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
383 	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
384 	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
385 	ib.ptr[ib.length_dw++] = dummy;
386 	ib.ptr[ib.length_dw++] = 0x00000001;
387 
388 	for (i = ib.length_dw; i < ib_size_dw; ++i)
389 		ib.ptr[i] = 0x0;
390 
391 	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
392 	if (r) {
393 		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
394 	}
395 
396 	if (fence)
397 		*fence = amdgpu_fence_ref(ib.fence);
398 
399 	amdgpu_ib_free(ring->adev, &ib);
400 
401 	return r;
402 }
403 
404 /**
405  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
406  *
407  * @adev: amdgpu_device pointer
408  * @ring: ring we should submit the msg to
409  * @handle: VCE session handle to use
410  * @fence: optional fence to return
411  *
412  * Close up a stream for HW test or if userspace failed to do so
413  */
414 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
415 			       struct amdgpu_fence **fence)
416 {
417 	const unsigned ib_size_dw = 1024;
418 	struct amdgpu_ib ib;
419 	uint64_t dummy;
420 	int i, r;
421 
422 	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
423 	if (r) {
424 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
425 		return r;
426 	}
427 
428 	dummy = ib.gpu_addr + 1024;
429 
430 	/* stitch together an VCE destroy msg */
431 	ib.length_dw = 0;
432 	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
433 	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
434 	ib.ptr[ib.length_dw++] = handle;
435 
436 	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
437 	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
438 	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
439 	ib.ptr[ib.length_dw++] = dummy;
440 	ib.ptr[ib.length_dw++] = 0x00000001;
441 
442 	ib.ptr[ib.length_dw++] = 0x00000008; /* len */
443 	ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
444 
445 	for (i = ib.length_dw; i < ib_size_dw; ++i)
446 		ib.ptr[i] = 0x0;
447 
448 	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
449 	if (r) {
450 		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
451 	}
452 
453 	if (fence)
454 		*fence = amdgpu_fence_ref(ib.fence);
455 
456 	amdgpu_ib_free(ring->adev, &ib);
457 
458 	return r;
459 }
460 
461 /**
462  * amdgpu_vce_cs_reloc - command submission relocation
463  *
464  * @p: parser context
465  * @lo: address of lower dword
466  * @hi: address of higher dword
467  * @size: minimum size
468  *
469  * Patch relocation inside command stream with real buffer address
470  */
471 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
472 			       int lo, int hi, unsigned size, uint32_t index)
473 {
474 	struct amdgpu_bo_va_mapping *mapping;
475 	struct amdgpu_ib *ib = &p->ibs[ib_idx];
476 	struct amdgpu_bo *bo;
477 	uint64_t addr;
478 
479 	if (index == 0xffffffff)
480 		index = 0;
481 
482 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
483 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
484 	addr += ((uint64_t)size) * ((uint64_t)index);
485 
486 	mapping = amdgpu_cs_find_mapping(p, addr, &bo);
487 	if (mapping == NULL) {
488 		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
489 			  addr, lo, hi, size, index);
490 		return -EINVAL;
491 	}
492 
493 	if ((addr + (uint64_t)size) >
494 	    ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
495 		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
496 			  addr, lo, hi);
497 		return -EINVAL;
498 	}
499 
500 	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
501 	addr += amdgpu_bo_gpu_offset(bo);
502 	addr -= ((uint64_t)size) * ((uint64_t)index);
503 
504 	ib->ptr[lo] = addr & 0xFFFFFFFF;
505 	ib->ptr[hi] = addr >> 32;
506 
507 	return 0;
508 }
509 
510 /**
511  * amdgpu_vce_validate_handle - validate stream handle
512  *
513  * @p: parser context
514  * @handle: handle to validate
515  * @allocated: allocated a new handle?
516  *
517  * Validates the handle and return the found session index or -EINVAL
518  * we we don't have another free session index.
519  */
520 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
521 				      uint32_t handle, bool *allocated)
522 {
523 	unsigned i;
524 
525 	*allocated = false;
526 
527 	/* validate the handle */
528 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
529 		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
530 			if (p->adev->vce.filp[i] != p->filp) {
531 				DRM_ERROR("VCE handle collision detected!\n");
532 				return -EINVAL;
533 			}
534 			return i;
535 		}
536 	}
537 
538 	/* handle not found try to alloc a new one */
539 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
540 		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
541 			p->adev->vce.filp[i] = p->filp;
542 			p->adev->vce.img_size[i] = 0;
543 			*allocated = true;
544 			return i;
545 		}
546 	}
547 
548 	DRM_ERROR("No more free VCE handles!\n");
549 	return -EINVAL;
550 }
551 
552 /**
553  * amdgpu_vce_cs_parse - parse and validate the command stream
554  *
555  * @p: parser context
556  *
557  */
558 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
559 {
560 	struct amdgpu_ib *ib = &p->ibs[ib_idx];
561 	unsigned fb_idx = 0, bs_idx = 0;
562 	int session_idx = -1;
563 	bool destroyed = false;
564 	bool created = false;
565 	bool allocated = false;
566 	uint32_t tmp, handle = 0;
567 	uint32_t *size = &tmp;
568 	int i, r = 0, idx = 0;
569 
570 	amdgpu_vce_note_usage(p->adev);
571 
572 	while (idx < ib->length_dw) {
573 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
574 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
575 
576 		if ((len < 8) || (len & 3)) {
577 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
578 			r = -EINVAL;
579 			goto out;
580 		}
581 
582 		if (destroyed) {
583 			DRM_ERROR("No other command allowed after destroy!\n");
584 			r = -EINVAL;
585 			goto out;
586 		}
587 
588 		switch (cmd) {
589 		case 0x00000001: // session
590 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
591 			session_idx = amdgpu_vce_validate_handle(p, handle,
592 								 &allocated);
593 			if (session_idx < 0)
594 				return session_idx;
595 			size = &p->adev->vce.img_size[session_idx];
596 			break;
597 
598 		case 0x00000002: // task info
599 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
600 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
601 			break;
602 
603 		case 0x01000001: // create
604 			created = true;
605 			if (!allocated) {
606 				DRM_ERROR("Handle already in use!\n");
607 				r = -EINVAL;
608 				goto out;
609 			}
610 
611 			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
612 				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
613 				8 * 3 / 2;
614 			break;
615 
616 		case 0x04000001: // config extension
617 		case 0x04000002: // pic control
618 		case 0x04000005: // rate control
619 		case 0x04000007: // motion estimation
620 		case 0x04000008: // rdo
621 		case 0x04000009: // vui
622 		case 0x05000002: // auxiliary buffer
623 			break;
624 
625 		case 0x03000001: // encode
626 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
627 						*size, 0);
628 			if (r)
629 				goto out;
630 
631 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
632 						*size / 3, 0);
633 			if (r)
634 				goto out;
635 			break;
636 
637 		case 0x02000001: // destroy
638 			destroyed = true;
639 			break;
640 
641 		case 0x05000001: // context buffer
642 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
643 						*size * 2, 0);
644 			if (r)
645 				goto out;
646 			break;
647 
648 		case 0x05000004: // video bitstream buffer
649 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
650 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
651 						tmp, bs_idx);
652 			if (r)
653 				goto out;
654 			break;
655 
656 		case 0x05000005: // feedback buffer
657 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
658 						4096, fb_idx);
659 			if (r)
660 				goto out;
661 			break;
662 
663 		default:
664 			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
665 			r = -EINVAL;
666 			goto out;
667 		}
668 
669 		if (session_idx == -1) {
670 			DRM_ERROR("no session command at start of IB\n");
671 			r = -EINVAL;
672 			goto out;
673 		}
674 
675 		idx += len / 4;
676 	}
677 
678 	if (allocated && !created) {
679 		DRM_ERROR("New session without create command!\n");
680 		r = -ENOENT;
681 	}
682 
683 out:
684 	if ((!r && destroyed) || (r && allocated)) {
685 		/*
686 		 * IB contains a destroy msg or we have allocated an
687 		 * handle and got an error, anyway free the handle
688 		 */
689 		for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
690 			atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
691 	}
692 
693 	return r;
694 }
695 
696 /**
697  * amdgpu_vce_ring_emit_semaphore - emit a semaphore command
698  *
699  * @ring: engine to use
700  * @semaphore: address of semaphore
701  * @emit_wait: true=emit wait, false=emit signal
702  *
703  */
704 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
705 				    struct amdgpu_semaphore *semaphore,
706 				    bool emit_wait)
707 {
708 	uint64_t addr = semaphore->gpu_addr;
709 
710 	amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
711 	amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
712 	amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
713 	amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
714 	if (!emit_wait)
715 		amdgpu_ring_write(ring, VCE_CMD_END);
716 
717 	return true;
718 }
719 
720 /**
721  * amdgpu_vce_ring_emit_ib - execute indirect buffer
722  *
723  * @ring: engine to use
724  * @ib: the IB to execute
725  *
726  */
727 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
728 {
729 	amdgpu_ring_write(ring, VCE_CMD_IB);
730 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
731 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
732 	amdgpu_ring_write(ring, ib->length_dw);
733 }
734 
735 /**
736  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
737  *
738  * @ring: engine to use
739  * @fence: the fence
740  *
741  */
742 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
743 				unsigned flags)
744 {
745 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
746 
747 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
748 	amdgpu_ring_write(ring, addr);
749 	amdgpu_ring_write(ring, upper_32_bits(addr));
750 	amdgpu_ring_write(ring, seq);
751 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
752 	amdgpu_ring_write(ring, VCE_CMD_END);
753 }
754 
755 /**
756  * amdgpu_vce_ring_test_ring - test if VCE ring is working
757  *
758  * @ring: the engine to test on
759  *
760  */
761 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
762 {
763 	struct amdgpu_device *adev = ring->adev;
764 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
765 	unsigned i;
766 	int r;
767 
768 	r = amdgpu_ring_lock(ring, 16);
769 	if (r) {
770 		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
771 			  ring->idx, r);
772 		return r;
773 	}
774 	amdgpu_ring_write(ring, VCE_CMD_END);
775 	amdgpu_ring_unlock_commit(ring);
776 
777 	for (i = 0; i < adev->usec_timeout; i++) {
778 		if (amdgpu_ring_get_rptr(ring) != rptr)
779 			break;
780 		DRM_UDELAY(1);
781 	}
782 
783 	if (i < adev->usec_timeout) {
784 		DRM_INFO("ring test on %d succeeded in %d usecs\n",
785 			 ring->idx, i);
786 	} else {
787 		DRM_ERROR("amdgpu: ring %d test failed\n",
788 			  ring->idx);
789 		r = -ETIMEDOUT;
790 	}
791 
792 	return r;
793 }
794 
795 /**
796  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
797  *
798  * @ring: the engine to test on
799  *
800  */
801 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
802 {
803 	struct amdgpu_fence *fence = NULL;
804 	int r;
805 
806 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
807 	if (r) {
808 		DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
809 		goto error;
810 	}
811 
812 	r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
813 	if (r) {
814 		DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
815 		goto error;
816 	}
817 
818 	r = amdgpu_fence_wait(fence, false);
819 	if (r) {
820 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
821 	} else {
822 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
823 	}
824 error:
825 	amdgpu_fence_unref(&fence);
826 	return r;
827 }
828