1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27 
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
36 #include "cikd.h"
37 
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT	msecs_to_jiffies(1000)
40 
41 /* Firmware Names */
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE	"radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI	"radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI	"radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII	"radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS	"radeon/mullins_vce.bin"
48 #endif
49 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10	"amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11         "amdgpu/polaris11_vce.bin"
55 #define FIRMWARE_POLARIS12         "amdgpu/polaris12_vce.bin"
56 
57 #define FIRMWARE_VEGA10		"amdgpu/vega10_vce.bin"
58 
59 #ifdef CONFIG_DRM_AMDGPU_CIK
60 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
61 MODULE_FIRMWARE(FIRMWARE_KABINI);
62 MODULE_FIRMWARE(FIRMWARE_KAVERI);
63 MODULE_FIRMWARE(FIRMWARE_HAWAII);
64 MODULE_FIRMWARE(FIRMWARE_MULLINS);
65 #endif
66 MODULE_FIRMWARE(FIRMWARE_TONGA);
67 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
68 MODULE_FIRMWARE(FIRMWARE_FIJI);
69 MODULE_FIRMWARE(FIRMWARE_STONEY);
70 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
71 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
72 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
73 
74 MODULE_FIRMWARE(FIRMWARE_VEGA10);
75 
76 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
77 
78 /**
79  * amdgpu_vce_init - allocate memory, load vce firmware
80  *
81  * @adev: amdgpu_device pointer
82  *
83  * First step to get VCE online, allocate memory and load the firmware
84  */
85 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
86 {
87 	struct amdgpu_ring *ring;
88 	struct amd_sched_rq *rq;
89 	const char *fw_name;
90 	const struct common_firmware_header *hdr;
91 	unsigned ucode_version, version_major, version_minor, binary_id;
92 	int i, r;
93 
94 	switch (adev->asic_type) {
95 #ifdef CONFIG_DRM_AMDGPU_CIK
96 	case CHIP_BONAIRE:
97 		fw_name = FIRMWARE_BONAIRE;
98 		break;
99 	case CHIP_KAVERI:
100 		fw_name = FIRMWARE_KAVERI;
101 		break;
102 	case CHIP_KABINI:
103 		fw_name = FIRMWARE_KABINI;
104 		break;
105 	case CHIP_HAWAII:
106 		fw_name = FIRMWARE_HAWAII;
107 		break;
108 	case CHIP_MULLINS:
109 		fw_name = FIRMWARE_MULLINS;
110 		break;
111 #endif
112 	case CHIP_TONGA:
113 		fw_name = FIRMWARE_TONGA;
114 		break;
115 	case CHIP_CARRIZO:
116 		fw_name = FIRMWARE_CARRIZO;
117 		break;
118 	case CHIP_FIJI:
119 		fw_name = FIRMWARE_FIJI;
120 		break;
121 	case CHIP_STONEY:
122 		fw_name = FIRMWARE_STONEY;
123 		break;
124 	case CHIP_POLARIS10:
125 		fw_name = FIRMWARE_POLARIS10;
126 		break;
127 	case CHIP_POLARIS11:
128 		fw_name = FIRMWARE_POLARIS11;
129 		break;
130 	case CHIP_VEGA10:
131 		fw_name = FIRMWARE_VEGA10;
132 		break;
133 	case CHIP_POLARIS12:
134 		fw_name = FIRMWARE_POLARIS12;
135 		break;
136 
137 	default:
138 		return -EINVAL;
139 	}
140 
141 	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
142 	if (r) {
143 		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
144 			fw_name);
145 		return r;
146 	}
147 
148 	r = amdgpu_ucode_validate(adev->vce.fw);
149 	if (r) {
150 		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
151 			fw_name);
152 		release_firmware(adev->vce.fw);
153 		adev->vce.fw = NULL;
154 		return r;
155 	}
156 
157 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
158 
159 	ucode_version = le32_to_cpu(hdr->ucode_version);
160 	version_major = (ucode_version >> 20) & 0xfff;
161 	version_minor = (ucode_version >> 8) & 0xfff;
162 	binary_id = ucode_version & 0xff;
163 	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
164 		version_major, version_minor, binary_id);
165 	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
166 				(binary_id << 8));
167 
168 	r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
169 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
170 				    &adev->vce.gpu_addr, &adev->vce.cpu_addr);
171 	if (r) {
172 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
173 		return r;
174 	}
175 
176 	ring = &adev->vce.ring[0];
177 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
178 	r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
179 				  rq, amdgpu_sched_jobs);
180 	if (r != 0) {
181 		DRM_ERROR("Failed setting up VCE run queue.\n");
182 		return r;
183 	}
184 
185 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
186 		atomic_set(&adev->vce.handles[i], 0);
187 		adev->vce.filp[i] = NULL;
188 	}
189 
190 	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
191 	mutex_init(&adev->vce.idle_mutex);
192 
193 	return 0;
194 }
195 
196 /**
197  * amdgpu_vce_fini - free memory
198  *
199  * @adev: amdgpu_device pointer
200  *
201  * Last step on VCE teardown, free firmware memory
202  */
203 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
204 {
205 	unsigned i;
206 
207 	if (adev->vce.vcpu_bo == NULL)
208 		return 0;
209 
210 	amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
211 
212 	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
213 		(void **)&adev->vce.cpu_addr);
214 
215 	for (i = 0; i < adev->vce.num_rings; i++)
216 		amdgpu_ring_fini(&adev->vce.ring[i]);
217 
218 	release_firmware(adev->vce.fw);
219 	mutex_destroy(&adev->vce.idle_mutex);
220 
221 	return 0;
222 }
223 
224 /**
225  * amdgpu_vce_suspend - unpin VCE fw memory
226  *
227  * @adev: amdgpu_device pointer
228  *
229  */
230 int amdgpu_vce_suspend(struct amdgpu_device *adev)
231 {
232 	int i;
233 
234 	if (adev->vce.vcpu_bo == NULL)
235 		return 0;
236 
237 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
238 		if (atomic_read(&adev->vce.handles[i]))
239 			break;
240 
241 	if (i == AMDGPU_MAX_VCE_HANDLES)
242 		return 0;
243 
244 	cancel_delayed_work_sync(&adev->vce.idle_work);
245 	/* TODO: suspending running encoding sessions isn't supported */
246 	return -EINVAL;
247 }
248 
249 /**
250  * amdgpu_vce_resume - pin VCE fw memory
251  *
252  * @adev: amdgpu_device pointer
253  *
254  */
255 int amdgpu_vce_resume(struct amdgpu_device *adev)
256 {
257 	void *cpu_addr;
258 	const struct common_firmware_header *hdr;
259 	unsigned offset;
260 	int r;
261 
262 	if (adev->vce.vcpu_bo == NULL)
263 		return -EINVAL;
264 
265 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
266 	if (r) {
267 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
268 		return r;
269 	}
270 
271 	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
272 	if (r) {
273 		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
274 		dev_err(adev->dev, "(%d) VCE map failed\n", r);
275 		return r;
276 	}
277 
278 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
279 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
280 	memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
281 		    adev->vce.fw->size - offset);
282 
283 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
284 
285 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
286 
287 	return 0;
288 }
289 
290 /**
291  * amdgpu_vce_idle_work_handler - power off VCE
292  *
293  * @work: pointer to work structure
294  *
295  * power of VCE when it's not used any more
296  */
297 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
298 {
299 	struct amdgpu_device *adev =
300 		container_of(work, struct amdgpu_device, vce.idle_work.work);
301 	unsigned i, count = 0;
302 
303 	if (amdgpu_sriov_vf(adev))
304 		return;
305 
306 	for (i = 0; i < adev->vce.num_rings; i++)
307 		count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
308 
309 	if (count == 0) {
310 		if (adev->pm.dpm_enabled) {
311 			amdgpu_dpm_enable_vce(adev, false);
312 		} else {
313 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
314 			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
315 							    AMD_PG_STATE_GATE);
316 			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
317 							    AMD_CG_STATE_GATE);
318 		}
319 	} else {
320 		schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
321 	}
322 }
323 
324 /**
325  * amdgpu_vce_ring_begin_use - power up VCE
326  *
327  * @ring: amdgpu ring
328  *
329  * Make sure VCE is powerd up when we want to use it
330  */
331 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
332 {
333 	struct amdgpu_device *adev = ring->adev;
334 	bool set_clocks;
335 
336 	if (amdgpu_sriov_vf(adev))
337 		return;
338 
339 	mutex_lock(&adev->vce.idle_mutex);
340 	set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
341 	if (set_clocks) {
342 		if (adev->pm.dpm_enabled) {
343 			amdgpu_dpm_enable_vce(adev, true);
344 		} else {
345 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
346 			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
347 							    AMD_CG_STATE_UNGATE);
348 			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
349 							    AMD_PG_STATE_UNGATE);
350 
351 		}
352 	}
353 	mutex_unlock(&adev->vce.idle_mutex);
354 }
355 
356 /**
357  * amdgpu_vce_ring_end_use - power VCE down
358  *
359  * @ring: amdgpu ring
360  *
361  * Schedule work to power VCE down again
362  */
363 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
364 {
365 	schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
366 }
367 
368 /**
369  * amdgpu_vce_free_handles - free still open VCE handles
370  *
371  * @adev: amdgpu_device pointer
372  * @filp: drm file pointer
373  *
374  * Close all VCE handles still open by this file pointer
375  */
376 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
377 {
378 	struct amdgpu_ring *ring = &adev->vce.ring[0];
379 	int i, r;
380 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
381 		uint32_t handle = atomic_read(&adev->vce.handles[i]);
382 
383 		if (!handle || adev->vce.filp[i] != filp)
384 			continue;
385 
386 		r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
387 		if (r)
388 			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
389 
390 		adev->vce.filp[i] = NULL;
391 		atomic_set(&adev->vce.handles[i], 0);
392 	}
393 }
394 
395 /**
396  * amdgpu_vce_get_create_msg - generate a VCE create msg
397  *
398  * @adev: amdgpu_device pointer
399  * @ring: ring we should submit the msg to
400  * @handle: VCE session handle to use
401  * @fence: optional fence to return
402  *
403  * Open up a stream for HW test
404  */
405 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
406 			      struct dma_fence **fence)
407 {
408 	const unsigned ib_size_dw = 1024;
409 	struct amdgpu_job *job;
410 	struct amdgpu_ib *ib;
411 	struct dma_fence *f = NULL;
412 	uint64_t dummy;
413 	int i, r;
414 
415 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
416 	if (r)
417 		return r;
418 
419 	ib = &job->ibs[0];
420 
421 	dummy = ib->gpu_addr + 1024;
422 
423 	/* stitch together an VCE create msg */
424 	ib->length_dw = 0;
425 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
426 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
427 	ib->ptr[ib->length_dw++] = handle;
428 
429 	if ((ring->adev->vce.fw_version >> 24) >= 52)
430 		ib->ptr[ib->length_dw++] = 0x00000040; /* len */
431 	else
432 		ib->ptr[ib->length_dw++] = 0x00000030; /* len */
433 	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
434 	ib->ptr[ib->length_dw++] = 0x00000000;
435 	ib->ptr[ib->length_dw++] = 0x00000042;
436 	ib->ptr[ib->length_dw++] = 0x0000000a;
437 	ib->ptr[ib->length_dw++] = 0x00000001;
438 	ib->ptr[ib->length_dw++] = 0x00000080;
439 	ib->ptr[ib->length_dw++] = 0x00000060;
440 	ib->ptr[ib->length_dw++] = 0x00000100;
441 	ib->ptr[ib->length_dw++] = 0x00000100;
442 	ib->ptr[ib->length_dw++] = 0x0000000c;
443 	ib->ptr[ib->length_dw++] = 0x00000000;
444 	if ((ring->adev->vce.fw_version >> 24) >= 52) {
445 		ib->ptr[ib->length_dw++] = 0x00000000;
446 		ib->ptr[ib->length_dw++] = 0x00000000;
447 		ib->ptr[ib->length_dw++] = 0x00000000;
448 		ib->ptr[ib->length_dw++] = 0x00000000;
449 	}
450 
451 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
452 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
453 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
454 	ib->ptr[ib->length_dw++] = dummy;
455 	ib->ptr[ib->length_dw++] = 0x00000001;
456 
457 	for (i = ib->length_dw; i < ib_size_dw; ++i)
458 		ib->ptr[i] = 0x0;
459 
460 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
461 	job->fence = dma_fence_get(f);
462 	if (r)
463 		goto err;
464 
465 	amdgpu_job_free(job);
466 	if (fence)
467 		*fence = dma_fence_get(f);
468 	dma_fence_put(f);
469 	return 0;
470 
471 err:
472 	amdgpu_job_free(job);
473 	return r;
474 }
475 
476 /**
477  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
478  *
479  * @adev: amdgpu_device pointer
480  * @ring: ring we should submit the msg to
481  * @handle: VCE session handle to use
482  * @fence: optional fence to return
483  *
484  * Close up a stream for HW test or if userspace failed to do so
485  */
486 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
487 			       bool direct, struct dma_fence **fence)
488 {
489 	const unsigned ib_size_dw = 1024;
490 	struct amdgpu_job *job;
491 	struct amdgpu_ib *ib;
492 	struct dma_fence *f = NULL;
493 	int i, r;
494 
495 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
496 	if (r)
497 		return r;
498 
499 	ib = &job->ibs[0];
500 
501 	/* stitch together an VCE destroy msg */
502 	ib->length_dw = 0;
503 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
504 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
505 	ib->ptr[ib->length_dw++] = handle;
506 
507 	ib->ptr[ib->length_dw++] = 0x00000020; /* len */
508 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
509 	ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
510 	ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
511 	ib->ptr[ib->length_dw++] = 0x00000000;
512 	ib->ptr[ib->length_dw++] = 0x00000000;
513 	ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
514 	ib->ptr[ib->length_dw++] = 0x00000000;
515 
516 	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
517 	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
518 
519 	for (i = ib->length_dw; i < ib_size_dw; ++i)
520 		ib->ptr[i] = 0x0;
521 
522 	if (direct) {
523 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
524 		job->fence = dma_fence_get(f);
525 		if (r)
526 			goto err;
527 
528 		amdgpu_job_free(job);
529 	} else {
530 		r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
531 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
532 		if (r)
533 			goto err;
534 	}
535 
536 	if (fence)
537 		*fence = dma_fence_get(f);
538 	dma_fence_put(f);
539 	return 0;
540 
541 err:
542 	amdgpu_job_free(job);
543 	return r;
544 }
545 
546 /**
547  * amdgpu_vce_cs_reloc - command submission relocation
548  *
549  * @p: parser context
550  * @lo: address of lower dword
551  * @hi: address of higher dword
552  * @size: minimum size
553  *
554  * Patch relocation inside command stream with real buffer address
555  */
556 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
557 			       int lo, int hi, unsigned size, uint32_t index)
558 {
559 	struct amdgpu_bo_va_mapping *mapping;
560 	struct amdgpu_bo *bo;
561 	uint64_t addr;
562 
563 	if (index == 0xffffffff)
564 		index = 0;
565 
566 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
567 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
568 	addr += ((uint64_t)size) * ((uint64_t)index);
569 
570 	mapping = amdgpu_cs_find_mapping(p, addr, &bo);
571 	if (mapping == NULL) {
572 		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
573 			  addr, lo, hi, size, index);
574 		return -EINVAL;
575 	}
576 
577 	if ((addr + (uint64_t)size) >
578 	    (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
579 		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
580 			  addr, lo, hi);
581 		return -EINVAL;
582 	}
583 
584 	addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
585 	addr += amdgpu_bo_gpu_offset(bo);
586 	addr -= ((uint64_t)size) * ((uint64_t)index);
587 
588 	amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
589 	amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
590 
591 	return 0;
592 }
593 
594 /**
595  * amdgpu_vce_validate_handle - validate stream handle
596  *
597  * @p: parser context
598  * @handle: handle to validate
599  * @allocated: allocated a new handle?
600  *
601  * Validates the handle and return the found session index or -EINVAL
602  * we we don't have another free session index.
603  */
604 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
605 				      uint32_t handle, uint32_t *allocated)
606 {
607 	unsigned i;
608 
609 	/* validate the handle */
610 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
611 		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
612 			if (p->adev->vce.filp[i] != p->filp) {
613 				DRM_ERROR("VCE handle collision detected!\n");
614 				return -EINVAL;
615 			}
616 			return i;
617 		}
618 	}
619 
620 	/* handle not found try to alloc a new one */
621 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
622 		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
623 			p->adev->vce.filp[i] = p->filp;
624 			p->adev->vce.img_size[i] = 0;
625 			*allocated |= 1 << i;
626 			return i;
627 		}
628 	}
629 
630 	DRM_ERROR("No more free VCE handles!\n");
631 	return -EINVAL;
632 }
633 
634 /**
635  * amdgpu_vce_cs_parse - parse and validate the command stream
636  *
637  * @p: parser context
638  *
639  */
640 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
641 {
642 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
643 	unsigned fb_idx = 0, bs_idx = 0;
644 	int session_idx = -1;
645 	uint32_t destroyed = 0;
646 	uint32_t created = 0;
647 	uint32_t allocated = 0;
648 	uint32_t tmp, handle = 0;
649 	uint32_t *size = &tmp;
650 	int i, r, idx = 0;
651 
652 	p->job->vm = NULL;
653 	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
654 
655 	r = amdgpu_cs_sysvm_access_required(p);
656 	if (r)
657 		return r;
658 
659 	while (idx < ib->length_dw) {
660 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
661 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
662 
663 		if ((len < 8) || (len & 3)) {
664 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
665 			r = -EINVAL;
666 			goto out;
667 		}
668 
669 		switch (cmd) {
670 		case 0x00000001: /* session */
671 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
672 			session_idx = amdgpu_vce_validate_handle(p, handle,
673 								 &allocated);
674 			if (session_idx < 0) {
675 				r = session_idx;
676 				goto out;
677 			}
678 			size = &p->adev->vce.img_size[session_idx];
679 			break;
680 
681 		case 0x00000002: /* task info */
682 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
683 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
684 			break;
685 
686 		case 0x01000001: /* create */
687 			created |= 1 << session_idx;
688 			if (destroyed & (1 << session_idx)) {
689 				destroyed &= ~(1 << session_idx);
690 				allocated |= 1 << session_idx;
691 
692 			} else if (!(allocated & (1 << session_idx))) {
693 				DRM_ERROR("Handle already in use!\n");
694 				r = -EINVAL;
695 				goto out;
696 			}
697 
698 			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
699 				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
700 				8 * 3 / 2;
701 			break;
702 
703 		case 0x04000001: /* config extension */
704 		case 0x04000002: /* pic control */
705 		case 0x04000005: /* rate control */
706 		case 0x04000007: /* motion estimation */
707 		case 0x04000008: /* rdo */
708 		case 0x04000009: /* vui */
709 		case 0x05000002: /* auxiliary buffer */
710 		case 0x05000009: /* clock table */
711 			break;
712 
713 		case 0x0500000c: /* hw config */
714 			switch (p->adev->asic_type) {
715 #ifdef CONFIG_DRM_AMDGPU_CIK
716 			case CHIP_KAVERI:
717 			case CHIP_MULLINS:
718 #endif
719 			case CHIP_CARRIZO:
720 				break;
721 			default:
722 				r = -EINVAL;
723 				goto out;
724 			}
725 			break;
726 
727 		case 0x03000001: /* encode */
728 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
729 						*size, 0);
730 			if (r)
731 				goto out;
732 
733 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
734 						*size / 3, 0);
735 			if (r)
736 				goto out;
737 			break;
738 
739 		case 0x02000001: /* destroy */
740 			destroyed |= 1 << session_idx;
741 			break;
742 
743 		case 0x05000001: /* context buffer */
744 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
745 						*size * 2, 0);
746 			if (r)
747 				goto out;
748 			break;
749 
750 		case 0x05000004: /* video bitstream buffer */
751 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
752 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
753 						tmp, bs_idx);
754 			if (r)
755 				goto out;
756 			break;
757 
758 		case 0x05000005: /* feedback buffer */
759 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
760 						4096, fb_idx);
761 			if (r)
762 				goto out;
763 			break;
764 
765 		default:
766 			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
767 			r = -EINVAL;
768 			goto out;
769 		}
770 
771 		if (session_idx == -1) {
772 			DRM_ERROR("no session command at start of IB\n");
773 			r = -EINVAL;
774 			goto out;
775 		}
776 
777 		idx += len / 4;
778 	}
779 
780 	if (allocated & ~created) {
781 		DRM_ERROR("New session without create command!\n");
782 		r = -ENOENT;
783 	}
784 
785 out:
786 	if (!r) {
787 		/* No error, free all destroyed handle slots */
788 		tmp = destroyed;
789 	} else {
790 		/* Error during parsing, free all allocated handle slots */
791 		tmp = allocated;
792 	}
793 
794 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
795 		if (tmp & (1 << i))
796 			atomic_set(&p->adev->vce.handles[i], 0);
797 
798 	return r;
799 }
800 
801 /**
802  * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
803  *
804  * @p: parser context
805  *
806  */
807 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
808 {
809 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
810 	int session_idx = -1;
811 	uint32_t destroyed = 0;
812 	uint32_t created = 0;
813 	uint32_t allocated = 0;
814 	uint32_t tmp, handle = 0;
815 	int i, r = 0, idx = 0;
816 
817 	while (idx < ib->length_dw) {
818 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
819 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
820 
821 		if ((len < 8) || (len & 3)) {
822 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
823 			r = -EINVAL;
824 			goto out;
825 		}
826 
827 		switch (cmd) {
828 		case 0x00000001: /* session */
829 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
830 			session_idx = amdgpu_vce_validate_handle(p, handle,
831 								 &allocated);
832 			if (session_idx < 0) {
833 				r = session_idx;
834 				goto out;
835 			}
836 			break;
837 
838 		case 0x01000001: /* create */
839 			created |= 1 << session_idx;
840 			if (destroyed & (1 << session_idx)) {
841 				destroyed &= ~(1 << session_idx);
842 				allocated |= 1 << session_idx;
843 
844 			} else if (!(allocated & (1 << session_idx))) {
845 				DRM_ERROR("Handle already in use!\n");
846 				r = -EINVAL;
847 				goto out;
848 			}
849 
850 			break;
851 
852 		case 0x02000001: /* destroy */
853 			destroyed |= 1 << session_idx;
854 			break;
855 
856 		default:
857 			break;
858 		}
859 
860 		if (session_idx == -1) {
861 			DRM_ERROR("no session command at start of IB\n");
862 			r = -EINVAL;
863 			goto out;
864 		}
865 
866 		idx += len / 4;
867 	}
868 
869 	if (allocated & ~created) {
870 		DRM_ERROR("New session without create command!\n");
871 		r = -ENOENT;
872 	}
873 
874 out:
875 	if (!r) {
876 		/* No error, free all destroyed handle slots */
877 		tmp = destroyed;
878 		amdgpu_ib_free(p->adev, ib, NULL);
879 	} else {
880 		/* Error during parsing, free all allocated handle slots */
881 		tmp = allocated;
882 	}
883 
884 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
885 		if (tmp & (1 << i))
886 			atomic_set(&p->adev->vce.handles[i], 0);
887 
888 	return r;
889 }
890 
891 /**
892  * amdgpu_vce_ring_emit_ib - execute indirect buffer
893  *
894  * @ring: engine to use
895  * @ib: the IB to execute
896  *
897  */
898 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
899 			     unsigned vm_id, bool ctx_switch)
900 {
901 	amdgpu_ring_write(ring, VCE_CMD_IB);
902 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
903 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
904 	amdgpu_ring_write(ring, ib->length_dw);
905 }
906 
907 /**
908  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
909  *
910  * @ring: engine to use
911  * @fence: the fence
912  *
913  */
914 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
915 				unsigned flags)
916 {
917 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
918 
919 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
920 	amdgpu_ring_write(ring, addr);
921 	amdgpu_ring_write(ring, upper_32_bits(addr));
922 	amdgpu_ring_write(ring, seq);
923 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
924 	amdgpu_ring_write(ring, VCE_CMD_END);
925 }
926 
927 /**
928  * amdgpu_vce_ring_test_ring - test if VCE ring is working
929  *
930  * @ring: the engine to test on
931  *
932  */
933 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
934 {
935 	struct amdgpu_device *adev = ring->adev;
936 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
937 	unsigned i;
938 	int r, timeout = adev->usec_timeout;
939 
940 	/* workaround VCE ring test slow issue for sriov*/
941 	if (amdgpu_sriov_vf(adev))
942 		timeout *= 10;
943 
944 	r = amdgpu_ring_alloc(ring, 16);
945 	if (r) {
946 		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
947 			  ring->idx, r);
948 		return r;
949 	}
950 	amdgpu_ring_write(ring, VCE_CMD_END);
951 	amdgpu_ring_commit(ring);
952 
953 	for (i = 0; i < timeout; i++) {
954 		if (amdgpu_ring_get_rptr(ring) != rptr)
955 			break;
956 		DRM_UDELAY(1);
957 	}
958 
959 	if (i < timeout) {
960 		DRM_INFO("ring test on %d succeeded in %d usecs\n",
961 			 ring->idx, i);
962 	} else {
963 		DRM_ERROR("amdgpu: ring %d test failed\n",
964 			  ring->idx);
965 		r = -ETIMEDOUT;
966 	}
967 
968 	return r;
969 }
970 
971 /**
972  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
973  *
974  * @ring: the engine to test on
975  *
976  */
977 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
978 {
979 	struct dma_fence *fence = NULL;
980 	long r;
981 
982 	/* skip vce ring1/2 ib test for now, since it's not reliable */
983 	if (ring != &ring->adev->vce.ring[0])
984 		return 0;
985 
986 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
987 	if (r) {
988 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
989 		goto error;
990 	}
991 
992 	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
993 	if (r) {
994 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
995 		goto error;
996 	}
997 
998 	r = dma_fence_wait_timeout(fence, false, timeout);
999 	if (r == 0) {
1000 		DRM_ERROR("amdgpu: IB test timed out.\n");
1001 		r = -ETIMEDOUT;
1002 	} else if (r < 0) {
1003 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1004 	} else {
1005 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
1006 		r = 0;
1007 	}
1008 error:
1009 	dma_fence_put(fence);
1010 	return r;
1011 }
1012