1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27 
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
36 #include "cikd.h"
37 
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT_MS	1000
40 
41 /* Firmware Names */
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE	"radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI 	"radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI 	"radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII 	"radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS	"radeon/mullins_vce.bin"
48 #endif
49 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
53 
54 #ifdef CONFIG_DRM_AMDGPU_CIK
55 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
56 MODULE_FIRMWARE(FIRMWARE_KABINI);
57 MODULE_FIRMWARE(FIRMWARE_KAVERI);
58 MODULE_FIRMWARE(FIRMWARE_HAWAII);
59 MODULE_FIRMWARE(FIRMWARE_MULLINS);
60 #endif
61 MODULE_FIRMWARE(FIRMWARE_TONGA);
62 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
63 MODULE_FIRMWARE(FIRMWARE_FIJI);
64 MODULE_FIRMWARE(FIRMWARE_STONEY);
65 
66 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
67 
68 /**
69  * amdgpu_vce_init - allocate memory, load vce firmware
70  *
71  * @adev: amdgpu_device pointer
72  *
73  * First step to get VCE online, allocate memory and load the firmware
74  */
75 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
76 {
77 	const char *fw_name;
78 	const struct common_firmware_header *hdr;
79 	unsigned ucode_version, version_major, version_minor, binary_id;
80 	int i, r;
81 
82 	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
83 
84 	switch (adev->asic_type) {
85 #ifdef CONFIG_DRM_AMDGPU_CIK
86 	case CHIP_BONAIRE:
87 		fw_name = FIRMWARE_BONAIRE;
88 		break;
89 	case CHIP_KAVERI:
90 		fw_name = FIRMWARE_KAVERI;
91 		break;
92 	case CHIP_KABINI:
93 		fw_name = FIRMWARE_KABINI;
94 		break;
95 	case CHIP_HAWAII:
96 		fw_name = FIRMWARE_HAWAII;
97 		break;
98 	case CHIP_MULLINS:
99 		fw_name = FIRMWARE_MULLINS;
100 		break;
101 #endif
102 	case CHIP_TONGA:
103 		fw_name = FIRMWARE_TONGA;
104 		break;
105 	case CHIP_CARRIZO:
106 		fw_name = FIRMWARE_CARRIZO;
107 		break;
108 	case CHIP_FIJI:
109 		fw_name = FIRMWARE_FIJI;
110 		break;
111 	case CHIP_STONEY:
112 		fw_name = FIRMWARE_STONEY;
113 		break;
114 
115 	default:
116 		return -EINVAL;
117 	}
118 
119 	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
120 	if (r) {
121 		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
122 			fw_name);
123 		return r;
124 	}
125 
126 	r = amdgpu_ucode_validate(adev->vce.fw);
127 	if (r) {
128 		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
129 			fw_name);
130 		release_firmware(adev->vce.fw);
131 		adev->vce.fw = NULL;
132 		return r;
133 	}
134 
135 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
136 
137 	ucode_version = le32_to_cpu(hdr->ucode_version);
138 	version_major = (ucode_version >> 20) & 0xfff;
139 	version_minor = (ucode_version >> 8) & 0xfff;
140 	binary_id = ucode_version & 0xff;
141 	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
142 		version_major, version_minor, binary_id);
143 	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
144 				(binary_id << 8));
145 
146 	/* allocate firmware, stack and heap BO */
147 
148 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
149 			     AMDGPU_GEM_DOMAIN_VRAM,
150 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
151 			     NULL, NULL, &adev->vce.vcpu_bo);
152 	if (r) {
153 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
154 		return r;
155 	}
156 
157 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
158 	if (r) {
159 		amdgpu_bo_unref(&adev->vce.vcpu_bo);
160 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
161 		return r;
162 	}
163 
164 	r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
165 			  &adev->vce.gpu_addr);
166 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
167 	if (r) {
168 		amdgpu_bo_unref(&adev->vce.vcpu_bo);
169 		dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
170 		return r;
171 	}
172 
173 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
174 		atomic_set(&adev->vce.handles[i], 0);
175 		adev->vce.filp[i] = NULL;
176 	}
177 
178 	return 0;
179 }
180 
181 /**
182  * amdgpu_vce_fini - free memory
183  *
184  * @adev: amdgpu_device pointer
185  *
186  * Last step on VCE teardown, free firmware memory
187  */
188 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
189 {
190 	if (adev->vce.vcpu_bo == NULL)
191 		return 0;
192 
193 	amdgpu_bo_unref(&adev->vce.vcpu_bo);
194 
195 	amdgpu_ring_fini(&adev->vce.ring[0]);
196 	amdgpu_ring_fini(&adev->vce.ring[1]);
197 
198 	release_firmware(adev->vce.fw);
199 
200 	return 0;
201 }
202 
203 /**
204  * amdgpu_vce_suspend - unpin VCE fw memory
205  *
206  * @adev: amdgpu_device pointer
207  *
208  */
209 int amdgpu_vce_suspend(struct amdgpu_device *adev)
210 {
211 	int i;
212 
213 	if (adev->vce.vcpu_bo == NULL)
214 		return 0;
215 
216 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
217 		if (atomic_read(&adev->vce.handles[i]))
218 			break;
219 
220 	if (i == AMDGPU_MAX_VCE_HANDLES)
221 		return 0;
222 
223 	/* TODO: suspending running encoding sessions isn't supported */
224 	return -EINVAL;
225 }
226 
227 /**
228  * amdgpu_vce_resume - pin VCE fw memory
229  *
230  * @adev: amdgpu_device pointer
231  *
232  */
233 int amdgpu_vce_resume(struct amdgpu_device *adev)
234 {
235 	void *cpu_addr;
236 	const struct common_firmware_header *hdr;
237 	unsigned offset;
238 	int r;
239 
240 	if (adev->vce.vcpu_bo == NULL)
241 		return -EINVAL;
242 
243 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
244 	if (r) {
245 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
246 		return r;
247 	}
248 
249 	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
250 	if (r) {
251 		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
252 		dev_err(adev->dev, "(%d) VCE map failed\n", r);
253 		return r;
254 	}
255 
256 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
257 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
258 	memcpy(cpu_addr, (adev->vce.fw->data) + offset,
259 		(adev->vce.fw->size) - offset);
260 
261 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
262 
263 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
264 
265 	return 0;
266 }
267 
268 /**
269  * amdgpu_vce_idle_work_handler - power off VCE
270  *
271  * @work: pointer to work structure
272  *
273  * power of VCE when it's not used any more
274  */
275 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
276 {
277 	struct amdgpu_device *adev =
278 		container_of(work, struct amdgpu_device, vce.idle_work.work);
279 
280 	if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
281 	    (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
282 		if (adev->pm.dpm_enabled) {
283 			amdgpu_dpm_enable_vce(adev, false);
284 		} else {
285 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
286 		}
287 	} else {
288 		schedule_delayed_work(&adev->vce.idle_work,
289 				      msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
290 	}
291 }
292 
293 /**
294  * amdgpu_vce_note_usage - power up VCE
295  *
296  * @adev: amdgpu_device pointer
297  *
298  * Make sure VCE is powerd up when we want to use it
299  */
300 static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
301 {
302 	bool streams_changed = false;
303 	bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
304 	set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
305 					    msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
306 
307 	if (adev->pm.dpm_enabled) {
308 		/* XXX figure out if the streams changed */
309 		streams_changed = false;
310 	}
311 
312 	if (set_clocks || streams_changed) {
313 		if (adev->pm.dpm_enabled) {
314 			amdgpu_dpm_enable_vce(adev, true);
315 		} else {
316 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
317 		}
318 	}
319 }
320 
321 /**
322  * amdgpu_vce_free_handles - free still open VCE handles
323  *
324  * @adev: amdgpu_device pointer
325  * @filp: drm file pointer
326  *
327  * Close all VCE handles still open by this file pointer
328  */
329 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
330 {
331 	struct amdgpu_ring *ring = &adev->vce.ring[0];
332 	int i, r;
333 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
334 		uint32_t handle = atomic_read(&adev->vce.handles[i]);
335 		if (!handle || adev->vce.filp[i] != filp)
336 			continue;
337 
338 		amdgpu_vce_note_usage(adev);
339 
340 		r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
341 		if (r)
342 			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
343 
344 		adev->vce.filp[i] = NULL;
345 		atomic_set(&adev->vce.handles[i], 0);
346 	}
347 }
348 
349 static int amdgpu_vce_free_job(
350 	struct amdgpu_job *job)
351 {
352 	amdgpu_ib_free(job->adev, job->ibs);
353 	kfree(job->ibs);
354 	return 0;
355 }
356 
357 /**
358  * amdgpu_vce_get_create_msg - generate a VCE create msg
359  *
360  * @adev: amdgpu_device pointer
361  * @ring: ring we should submit the msg to
362  * @handle: VCE session handle to use
363  * @fence: optional fence to return
364  *
365  * Open up a stream for HW test
366  */
367 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
368 			      struct fence **fence)
369 {
370 	const unsigned ib_size_dw = 1024;
371 	struct amdgpu_ib *ib = NULL;
372 	struct fence *f = NULL;
373 	struct amdgpu_device *adev = ring->adev;
374 	uint64_t dummy;
375 	int i, r;
376 
377 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
378 	if (!ib)
379 		return -ENOMEM;
380 	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
381 	if (r) {
382 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
383 		kfree(ib);
384 		return r;
385 	}
386 
387 	dummy = ib->gpu_addr + 1024;
388 
389 	/* stitch together an VCE create msg */
390 	ib->length_dw = 0;
391 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
392 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
393 	ib->ptr[ib->length_dw++] = handle;
394 
395 	ib->ptr[ib->length_dw++] = 0x00000030; /* len */
396 	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
397 	ib->ptr[ib->length_dw++] = 0x00000000;
398 	ib->ptr[ib->length_dw++] = 0x00000042;
399 	ib->ptr[ib->length_dw++] = 0x0000000a;
400 	ib->ptr[ib->length_dw++] = 0x00000001;
401 	ib->ptr[ib->length_dw++] = 0x00000080;
402 	ib->ptr[ib->length_dw++] = 0x00000060;
403 	ib->ptr[ib->length_dw++] = 0x00000100;
404 	ib->ptr[ib->length_dw++] = 0x00000100;
405 	ib->ptr[ib->length_dw++] = 0x0000000c;
406 	ib->ptr[ib->length_dw++] = 0x00000000;
407 
408 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
409 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
410 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
411 	ib->ptr[ib->length_dw++] = dummy;
412 	ib->ptr[ib->length_dw++] = 0x00000001;
413 
414 	for (i = ib->length_dw; i < ib_size_dw; ++i)
415 		ib->ptr[i] = 0x0;
416 
417 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
418 						 &amdgpu_vce_free_job,
419 						 AMDGPU_FENCE_OWNER_UNDEFINED,
420 						 &f);
421 	if (r)
422 		goto err;
423 	if (fence)
424 		*fence = fence_get(f);
425 	fence_put(f);
426 	if (amdgpu_enable_scheduler)
427 		return 0;
428 err:
429 	amdgpu_ib_free(adev, ib);
430 	kfree(ib);
431 	return r;
432 }
433 
434 /**
435  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
436  *
437  * @adev: amdgpu_device pointer
438  * @ring: ring we should submit the msg to
439  * @handle: VCE session handle to use
440  * @fence: optional fence to return
441  *
442  * Close up a stream for HW test or if userspace failed to do so
443  */
444 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
445 			       struct fence **fence)
446 {
447 	const unsigned ib_size_dw = 1024;
448 	struct amdgpu_ib *ib = NULL;
449 	struct fence *f = NULL;
450 	struct amdgpu_device *adev = ring->adev;
451 	uint64_t dummy;
452 	int i, r;
453 
454 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
455 	if (!ib)
456 		return -ENOMEM;
457 
458 	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
459 	if (r) {
460 		kfree(ib);
461 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
462 		return r;
463 	}
464 
465 	dummy = ib->gpu_addr + 1024;
466 
467 	/* stitch together an VCE destroy msg */
468 	ib->length_dw = 0;
469 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
470 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
471 	ib->ptr[ib->length_dw++] = handle;
472 
473 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
474 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
475 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
476 	ib->ptr[ib->length_dw++] = dummy;
477 	ib->ptr[ib->length_dw++] = 0x00000001;
478 
479 	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
480 	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
481 
482 	for (i = ib->length_dw; i < ib_size_dw; ++i)
483 		ib->ptr[i] = 0x0;
484 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
485 						 &amdgpu_vce_free_job,
486 						 AMDGPU_FENCE_OWNER_UNDEFINED,
487 						 &f);
488 	if (r)
489 		goto err;
490 	if (fence)
491 		*fence = fence_get(f);
492 	fence_put(f);
493 	if (amdgpu_enable_scheduler)
494 		return 0;
495 err:
496 	amdgpu_ib_free(adev, ib);
497 	kfree(ib);
498 	return r;
499 }
500 
501 /**
502  * amdgpu_vce_cs_reloc - command submission relocation
503  *
504  * @p: parser context
505  * @lo: address of lower dword
506  * @hi: address of higher dword
507  * @size: minimum size
508  *
509  * Patch relocation inside command stream with real buffer address
510  */
511 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
512 			       int lo, int hi, unsigned size, uint32_t index)
513 {
514 	struct amdgpu_bo_va_mapping *mapping;
515 	struct amdgpu_ib *ib = &p->ibs[ib_idx];
516 	struct amdgpu_bo *bo;
517 	uint64_t addr;
518 
519 	if (index == 0xffffffff)
520 		index = 0;
521 
522 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
523 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
524 	addr += ((uint64_t)size) * ((uint64_t)index);
525 
526 	mapping = amdgpu_cs_find_mapping(p, addr, &bo);
527 	if (mapping == NULL) {
528 		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
529 			  addr, lo, hi, size, index);
530 		return -EINVAL;
531 	}
532 
533 	if ((addr + (uint64_t)size) >
534 	    ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
535 		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
536 			  addr, lo, hi);
537 		return -EINVAL;
538 	}
539 
540 	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
541 	addr += amdgpu_bo_gpu_offset(bo);
542 	addr -= ((uint64_t)size) * ((uint64_t)index);
543 
544 	ib->ptr[lo] = addr & 0xFFFFFFFF;
545 	ib->ptr[hi] = addr >> 32;
546 
547 	return 0;
548 }
549 
550 /**
551  * amdgpu_vce_validate_handle - validate stream handle
552  *
553  * @p: parser context
554  * @handle: handle to validate
555  * @allocated: allocated a new handle?
556  *
557  * Validates the handle and return the found session index or -EINVAL
558  * we we don't have another free session index.
559  */
560 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
561 				      uint32_t handle, bool *allocated)
562 {
563 	unsigned i;
564 
565 	*allocated = false;
566 
567 	/* validate the handle */
568 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
569 		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
570 			if (p->adev->vce.filp[i] != p->filp) {
571 				DRM_ERROR("VCE handle collision detected!\n");
572 				return -EINVAL;
573 			}
574 			return i;
575 		}
576 	}
577 
578 	/* handle not found try to alloc a new one */
579 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
580 		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
581 			p->adev->vce.filp[i] = p->filp;
582 			p->adev->vce.img_size[i] = 0;
583 			*allocated = true;
584 			return i;
585 		}
586 	}
587 
588 	DRM_ERROR("No more free VCE handles!\n");
589 	return -EINVAL;
590 }
591 
592 /**
593  * amdgpu_vce_cs_parse - parse and validate the command stream
594  *
595  * @p: parser context
596  *
597  */
598 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
599 {
600 	struct amdgpu_ib *ib = &p->ibs[ib_idx];
601 	unsigned fb_idx = 0, bs_idx = 0;
602 	int session_idx = -1;
603 	bool destroyed = false;
604 	bool created = false;
605 	bool allocated = false;
606 	uint32_t tmp, handle = 0;
607 	uint32_t *size = &tmp;
608 	int i, r = 0, idx = 0;
609 
610 	amdgpu_vce_note_usage(p->adev);
611 
612 	while (idx < ib->length_dw) {
613 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
614 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
615 
616 		if ((len < 8) || (len & 3)) {
617 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
618 			r = -EINVAL;
619 			goto out;
620 		}
621 
622 		if (destroyed) {
623 			DRM_ERROR("No other command allowed after destroy!\n");
624 			r = -EINVAL;
625 			goto out;
626 		}
627 
628 		switch (cmd) {
629 		case 0x00000001: // session
630 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
631 			session_idx = amdgpu_vce_validate_handle(p, handle,
632 								 &allocated);
633 			if (session_idx < 0)
634 				return session_idx;
635 			size = &p->adev->vce.img_size[session_idx];
636 			break;
637 
638 		case 0x00000002: // task info
639 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
640 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
641 			break;
642 
643 		case 0x01000001: // create
644 			created = true;
645 			if (!allocated) {
646 				DRM_ERROR("Handle already in use!\n");
647 				r = -EINVAL;
648 				goto out;
649 			}
650 
651 			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
652 				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
653 				8 * 3 / 2;
654 			break;
655 
656 		case 0x04000001: // config extension
657 		case 0x04000002: // pic control
658 		case 0x04000005: // rate control
659 		case 0x04000007: // motion estimation
660 		case 0x04000008: // rdo
661 		case 0x04000009: // vui
662 		case 0x05000002: // auxiliary buffer
663 			break;
664 
665 		case 0x03000001: // encode
666 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
667 						*size, 0);
668 			if (r)
669 				goto out;
670 
671 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
672 						*size / 3, 0);
673 			if (r)
674 				goto out;
675 			break;
676 
677 		case 0x02000001: // destroy
678 			destroyed = true;
679 			break;
680 
681 		case 0x05000001: // context buffer
682 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
683 						*size * 2, 0);
684 			if (r)
685 				goto out;
686 			break;
687 
688 		case 0x05000004: // video bitstream buffer
689 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
690 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
691 						tmp, bs_idx);
692 			if (r)
693 				goto out;
694 			break;
695 
696 		case 0x05000005: // feedback buffer
697 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
698 						4096, fb_idx);
699 			if (r)
700 				goto out;
701 			break;
702 
703 		default:
704 			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
705 			r = -EINVAL;
706 			goto out;
707 		}
708 
709 		if (session_idx == -1) {
710 			DRM_ERROR("no session command at start of IB\n");
711 			r = -EINVAL;
712 			goto out;
713 		}
714 
715 		idx += len / 4;
716 	}
717 
718 	if (allocated && !created) {
719 		DRM_ERROR("New session without create command!\n");
720 		r = -ENOENT;
721 	}
722 
723 out:
724 	if ((!r && destroyed) || (r && allocated)) {
725 		/*
726 		 * IB contains a destroy msg or we have allocated an
727 		 * handle and got an error, anyway free the handle
728 		 */
729 		for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
730 			atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
731 	}
732 
733 	return r;
734 }
735 
736 /**
737  * amdgpu_vce_ring_emit_semaphore - emit a semaphore command
738  *
739  * @ring: engine to use
740  * @semaphore: address of semaphore
741  * @emit_wait: true=emit wait, false=emit signal
742  *
743  */
744 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
745 				    struct amdgpu_semaphore *semaphore,
746 				    bool emit_wait)
747 {
748 	uint64_t addr = semaphore->gpu_addr;
749 
750 	amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
751 	amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
752 	amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
753 	amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
754 	if (!emit_wait)
755 		amdgpu_ring_write(ring, VCE_CMD_END);
756 
757 	return true;
758 }
759 
760 /**
761  * amdgpu_vce_ring_emit_ib - execute indirect buffer
762  *
763  * @ring: engine to use
764  * @ib: the IB to execute
765  *
766  */
767 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
768 {
769 	amdgpu_ring_write(ring, VCE_CMD_IB);
770 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
771 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
772 	amdgpu_ring_write(ring, ib->length_dw);
773 }
774 
775 /**
776  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
777  *
778  * @ring: engine to use
779  * @fence: the fence
780  *
781  */
782 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
783 				unsigned flags)
784 {
785 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
786 
787 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
788 	amdgpu_ring_write(ring, addr);
789 	amdgpu_ring_write(ring, upper_32_bits(addr));
790 	amdgpu_ring_write(ring, seq);
791 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
792 	amdgpu_ring_write(ring, VCE_CMD_END);
793 }
794 
795 /**
796  * amdgpu_vce_ring_test_ring - test if VCE ring is working
797  *
798  * @ring: the engine to test on
799  *
800  */
801 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
802 {
803 	struct amdgpu_device *adev = ring->adev;
804 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
805 	unsigned i;
806 	int r;
807 
808 	r = amdgpu_ring_lock(ring, 16);
809 	if (r) {
810 		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
811 			  ring->idx, r);
812 		return r;
813 	}
814 	amdgpu_ring_write(ring, VCE_CMD_END);
815 	amdgpu_ring_unlock_commit(ring);
816 
817 	for (i = 0; i < adev->usec_timeout; i++) {
818 		if (amdgpu_ring_get_rptr(ring) != rptr)
819 			break;
820 		DRM_UDELAY(1);
821 	}
822 
823 	if (i < adev->usec_timeout) {
824 		DRM_INFO("ring test on %d succeeded in %d usecs\n",
825 			 ring->idx, i);
826 	} else {
827 		DRM_ERROR("amdgpu: ring %d test failed\n",
828 			  ring->idx);
829 		r = -ETIMEDOUT;
830 	}
831 
832 	return r;
833 }
834 
835 /**
836  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
837  *
838  * @ring: the engine to test on
839  *
840  */
841 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
842 {
843 	struct fence *fence = NULL;
844 	int r;
845 
846 	/* skip vce ring1 ib test for now, since it's not reliable */
847 	if (ring == &ring->adev->vce.ring[1])
848 		return 0;
849 
850 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
851 	if (r) {
852 		DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
853 		goto error;
854 	}
855 
856 	r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
857 	if (r) {
858 		DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
859 		goto error;
860 	}
861 
862 	r = fence_wait(fence, false);
863 	if (r) {
864 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
865 	} else {
866 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
867 	}
868 error:
869 	fence_put(fence);
870 	return r;
871 }
872