1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27 
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
36 #include "cikd.h"
37 
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT	msecs_to_jiffies(1000)
40 
41 /* Firmware Names */
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE	"radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI	"radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI	"radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII	"radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS	"radeon/mullins_vce.bin"
48 #endif
49 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10	"amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11         "amdgpu/polaris11_vce.bin"
55 
56 #ifdef CONFIG_DRM_AMDGPU_CIK
57 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
58 MODULE_FIRMWARE(FIRMWARE_KABINI);
59 MODULE_FIRMWARE(FIRMWARE_KAVERI);
60 MODULE_FIRMWARE(FIRMWARE_HAWAII);
61 MODULE_FIRMWARE(FIRMWARE_MULLINS);
62 #endif
63 MODULE_FIRMWARE(FIRMWARE_TONGA);
64 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
65 MODULE_FIRMWARE(FIRMWARE_FIJI);
66 MODULE_FIRMWARE(FIRMWARE_STONEY);
67 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
68 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
69 
70 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
71 
72 /**
73  * amdgpu_vce_init - allocate memory, load vce firmware
74  *
75  * @adev: amdgpu_device pointer
76  *
77  * First step to get VCE online, allocate memory and load the firmware
78  */
79 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
80 {
81 	struct amdgpu_ring *ring;
82 	struct amd_sched_rq *rq;
83 	const char *fw_name;
84 	const struct common_firmware_header *hdr;
85 	unsigned ucode_version, version_major, version_minor, binary_id;
86 	int i, r;
87 
88 	switch (adev->asic_type) {
89 #ifdef CONFIG_DRM_AMDGPU_CIK
90 	case CHIP_BONAIRE:
91 		fw_name = FIRMWARE_BONAIRE;
92 		break;
93 	case CHIP_KAVERI:
94 		fw_name = FIRMWARE_KAVERI;
95 		break;
96 	case CHIP_KABINI:
97 		fw_name = FIRMWARE_KABINI;
98 		break;
99 	case CHIP_HAWAII:
100 		fw_name = FIRMWARE_HAWAII;
101 		break;
102 	case CHIP_MULLINS:
103 		fw_name = FIRMWARE_MULLINS;
104 		break;
105 #endif
106 	case CHIP_TONGA:
107 		fw_name = FIRMWARE_TONGA;
108 		break;
109 	case CHIP_CARRIZO:
110 		fw_name = FIRMWARE_CARRIZO;
111 		break;
112 	case CHIP_FIJI:
113 		fw_name = FIRMWARE_FIJI;
114 		break;
115 	case CHIP_STONEY:
116 		fw_name = FIRMWARE_STONEY;
117 		break;
118 	case CHIP_POLARIS10:
119 		fw_name = FIRMWARE_POLARIS10;
120 		break;
121 	case CHIP_POLARIS11:
122 		fw_name = FIRMWARE_POLARIS11;
123 		break;
124 
125 	default:
126 		return -EINVAL;
127 	}
128 
129 	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
130 	if (r) {
131 		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
132 			fw_name);
133 		return r;
134 	}
135 
136 	r = amdgpu_ucode_validate(adev->vce.fw);
137 	if (r) {
138 		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
139 			fw_name);
140 		release_firmware(adev->vce.fw);
141 		adev->vce.fw = NULL;
142 		return r;
143 	}
144 
145 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
146 
147 	ucode_version = le32_to_cpu(hdr->ucode_version);
148 	version_major = (ucode_version >> 20) & 0xfff;
149 	version_minor = (ucode_version >> 8) & 0xfff;
150 	binary_id = ucode_version & 0xff;
151 	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
152 		version_major, version_minor, binary_id);
153 	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
154 				(binary_id << 8));
155 
156 	/* allocate firmware, stack and heap BO */
157 
158 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
159 			     AMDGPU_GEM_DOMAIN_VRAM,
160 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
161 			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
162 			     NULL, NULL, &adev->vce.vcpu_bo);
163 	if (r) {
164 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
165 		return r;
166 	}
167 
168 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
169 	if (r) {
170 		amdgpu_bo_unref(&adev->vce.vcpu_bo);
171 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
172 		return r;
173 	}
174 
175 	r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
176 			  &adev->vce.gpu_addr);
177 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
178 	if (r) {
179 		amdgpu_bo_unref(&adev->vce.vcpu_bo);
180 		dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
181 		return r;
182 	}
183 
184 
185 	ring = &adev->vce.ring[0];
186 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
187 	r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
188 				  rq, amdgpu_sched_jobs);
189 	if (r != 0) {
190 		DRM_ERROR("Failed setting up VCE run queue.\n");
191 		return r;
192 	}
193 
194 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
195 		atomic_set(&adev->vce.handles[i], 0);
196 		adev->vce.filp[i] = NULL;
197 	}
198 
199 	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
200 	mutex_init(&adev->vce.idle_mutex);
201 
202 	return 0;
203 }
204 
205 /**
206  * amdgpu_vce_fini - free memory
207  *
208  * @adev: amdgpu_device pointer
209  *
210  * Last step on VCE teardown, free firmware memory
211  */
212 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
213 {
214 	unsigned i;
215 
216 	if (adev->vce.vcpu_bo == NULL)
217 		return 0;
218 
219 	amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
220 
221 	amdgpu_bo_unref(&adev->vce.vcpu_bo);
222 
223 	for (i = 0; i < adev->vce.num_rings; i++)
224 		amdgpu_ring_fini(&adev->vce.ring[i]);
225 
226 	release_firmware(adev->vce.fw);
227 	mutex_destroy(&adev->vce.idle_mutex);
228 
229 	return 0;
230 }
231 
232 /**
233  * amdgpu_vce_suspend - unpin VCE fw memory
234  *
235  * @adev: amdgpu_device pointer
236  *
237  */
238 int amdgpu_vce_suspend(struct amdgpu_device *adev)
239 {
240 	int i;
241 
242 	if (adev->vce.vcpu_bo == NULL)
243 		return 0;
244 
245 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
246 		if (atomic_read(&adev->vce.handles[i]))
247 			break;
248 
249 	if (i == AMDGPU_MAX_VCE_HANDLES)
250 		return 0;
251 
252 	cancel_delayed_work_sync(&adev->vce.idle_work);
253 	/* TODO: suspending running encoding sessions isn't supported */
254 	return -EINVAL;
255 }
256 
257 /**
258  * amdgpu_vce_resume - pin VCE fw memory
259  *
260  * @adev: amdgpu_device pointer
261  *
262  */
263 int amdgpu_vce_resume(struct amdgpu_device *adev)
264 {
265 	void *cpu_addr;
266 	const struct common_firmware_header *hdr;
267 	unsigned offset;
268 	int r;
269 
270 	if (adev->vce.vcpu_bo == NULL)
271 		return -EINVAL;
272 
273 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
274 	if (r) {
275 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
276 		return r;
277 	}
278 
279 	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
280 	if (r) {
281 		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
282 		dev_err(adev->dev, "(%d) VCE map failed\n", r);
283 		return r;
284 	}
285 
286 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
287 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
288 	memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
289 		    adev->vce.fw->size - offset);
290 
291 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
292 
293 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
294 
295 	return 0;
296 }
297 
298 /**
299  * amdgpu_vce_idle_work_handler - power off VCE
300  *
301  * @work: pointer to work structure
302  *
303  * power of VCE when it's not used any more
304  */
305 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
306 {
307 	struct amdgpu_device *adev =
308 		container_of(work, struct amdgpu_device, vce.idle_work.work);
309 	unsigned i, count = 0;
310 
311 	for (i = 0; i < adev->vce.num_rings; i++)
312 		count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
313 
314 	if (count == 0) {
315 		if (adev->pm.dpm_enabled) {
316 			amdgpu_dpm_enable_vce(adev, false);
317 		} else {
318 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
319 		}
320 	} else {
321 		schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
322 	}
323 }
324 
325 /**
326  * amdgpu_vce_ring_begin_use - power up VCE
327  *
328  * @ring: amdgpu ring
329  *
330  * Make sure VCE is powerd up when we want to use it
331  */
332 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
333 {
334 	struct amdgpu_device *adev = ring->adev;
335 	bool set_clocks;
336 
337 	mutex_lock(&adev->vce.idle_mutex);
338 	set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
339 	if (set_clocks) {
340 		if (adev->pm.dpm_enabled) {
341 			amdgpu_dpm_enable_vce(adev, true);
342 		} else {
343 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
344 		}
345 	}
346 	mutex_unlock(&adev->vce.idle_mutex);
347 }
348 
349 /**
350  * amdgpu_vce_ring_end_use - power VCE down
351  *
352  * @ring: amdgpu ring
353  *
354  * Schedule work to power VCE down again
355  */
356 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
357 {
358 	schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
359 }
360 
361 /**
362  * amdgpu_vce_free_handles - free still open VCE handles
363  *
364  * @adev: amdgpu_device pointer
365  * @filp: drm file pointer
366  *
367  * Close all VCE handles still open by this file pointer
368  */
369 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
370 {
371 	struct amdgpu_ring *ring = &adev->vce.ring[0];
372 	int i, r;
373 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
374 		uint32_t handle = atomic_read(&adev->vce.handles[i]);
375 
376 		if (!handle || adev->vce.filp[i] != filp)
377 			continue;
378 
379 		r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
380 		if (r)
381 			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
382 
383 		adev->vce.filp[i] = NULL;
384 		atomic_set(&adev->vce.handles[i], 0);
385 	}
386 }
387 
388 /**
389  * amdgpu_vce_get_create_msg - generate a VCE create msg
390  *
391  * @adev: amdgpu_device pointer
392  * @ring: ring we should submit the msg to
393  * @handle: VCE session handle to use
394  * @fence: optional fence to return
395  *
396  * Open up a stream for HW test
397  */
398 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
399 			      struct dma_fence **fence)
400 {
401 	const unsigned ib_size_dw = 1024;
402 	struct amdgpu_job *job;
403 	struct amdgpu_ib *ib;
404 	struct dma_fence *f = NULL;
405 	uint64_t dummy;
406 	int i, r;
407 
408 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
409 	if (r)
410 		return r;
411 
412 	ib = &job->ibs[0];
413 
414 	dummy = ib->gpu_addr + 1024;
415 
416 	/* stitch together an VCE create msg */
417 	ib->length_dw = 0;
418 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
419 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
420 	ib->ptr[ib->length_dw++] = handle;
421 
422 	if ((ring->adev->vce.fw_version >> 24) >= 52)
423 		ib->ptr[ib->length_dw++] = 0x00000040; /* len */
424 	else
425 		ib->ptr[ib->length_dw++] = 0x00000030; /* len */
426 	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
427 	ib->ptr[ib->length_dw++] = 0x00000000;
428 	ib->ptr[ib->length_dw++] = 0x00000042;
429 	ib->ptr[ib->length_dw++] = 0x0000000a;
430 	ib->ptr[ib->length_dw++] = 0x00000001;
431 	ib->ptr[ib->length_dw++] = 0x00000080;
432 	ib->ptr[ib->length_dw++] = 0x00000060;
433 	ib->ptr[ib->length_dw++] = 0x00000100;
434 	ib->ptr[ib->length_dw++] = 0x00000100;
435 	ib->ptr[ib->length_dw++] = 0x0000000c;
436 	ib->ptr[ib->length_dw++] = 0x00000000;
437 	if ((ring->adev->vce.fw_version >> 24) >= 52) {
438 		ib->ptr[ib->length_dw++] = 0x00000000;
439 		ib->ptr[ib->length_dw++] = 0x00000000;
440 		ib->ptr[ib->length_dw++] = 0x00000000;
441 		ib->ptr[ib->length_dw++] = 0x00000000;
442 	}
443 
444 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
445 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
446 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
447 	ib->ptr[ib->length_dw++] = dummy;
448 	ib->ptr[ib->length_dw++] = 0x00000001;
449 
450 	for (i = ib->length_dw; i < ib_size_dw; ++i)
451 		ib->ptr[i] = 0x0;
452 
453 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
454 	job->fence = dma_fence_get(f);
455 	if (r)
456 		goto err;
457 
458 	amdgpu_job_free(job);
459 	if (fence)
460 		*fence = dma_fence_get(f);
461 	dma_fence_put(f);
462 	return 0;
463 
464 err:
465 	amdgpu_job_free(job);
466 	return r;
467 }
468 
469 /**
470  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
471  *
472  * @adev: amdgpu_device pointer
473  * @ring: ring we should submit the msg to
474  * @handle: VCE session handle to use
475  * @fence: optional fence to return
476  *
477  * Close up a stream for HW test or if userspace failed to do so
478  */
479 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
480 			       bool direct, struct dma_fence **fence)
481 {
482 	const unsigned ib_size_dw = 1024;
483 	struct amdgpu_job *job;
484 	struct amdgpu_ib *ib;
485 	struct dma_fence *f = NULL;
486 	int i, r;
487 
488 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
489 	if (r)
490 		return r;
491 
492 	ib = &job->ibs[0];
493 
494 	/* stitch together an VCE destroy msg */
495 	ib->length_dw = 0;
496 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
497 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
498 	ib->ptr[ib->length_dw++] = handle;
499 
500 	ib->ptr[ib->length_dw++] = 0x00000020; /* len */
501 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
502 	ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
503 	ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
504 	ib->ptr[ib->length_dw++] = 0x00000000;
505 	ib->ptr[ib->length_dw++] = 0x00000000;
506 	ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
507 	ib->ptr[ib->length_dw++] = 0x00000000;
508 
509 	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
510 	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
511 
512 	for (i = ib->length_dw; i < ib_size_dw; ++i)
513 		ib->ptr[i] = 0x0;
514 
515 	if (direct) {
516 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
517 		job->fence = dma_fence_get(f);
518 		if (r)
519 			goto err;
520 
521 		amdgpu_job_free(job);
522 	} else {
523 		r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
524 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
525 		if (r)
526 			goto err;
527 	}
528 
529 	if (fence)
530 		*fence = dma_fence_get(f);
531 	dma_fence_put(f);
532 	return 0;
533 
534 err:
535 	amdgpu_job_free(job);
536 	return r;
537 }
538 
539 /**
540  * amdgpu_vce_cs_reloc - command submission relocation
541  *
542  * @p: parser context
543  * @lo: address of lower dword
544  * @hi: address of higher dword
545  * @size: minimum size
546  *
547  * Patch relocation inside command stream with real buffer address
548  */
549 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
550 			       int lo, int hi, unsigned size, uint32_t index)
551 {
552 	struct amdgpu_bo_va_mapping *mapping;
553 	struct amdgpu_bo *bo;
554 	uint64_t addr;
555 
556 	if (index == 0xffffffff)
557 		index = 0;
558 
559 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
560 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
561 	addr += ((uint64_t)size) * ((uint64_t)index);
562 
563 	mapping = amdgpu_cs_find_mapping(p, addr, &bo);
564 	if (mapping == NULL) {
565 		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
566 			  addr, lo, hi, size, index);
567 		return -EINVAL;
568 	}
569 
570 	if ((addr + (uint64_t)size) >
571 	    ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
572 		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
573 			  addr, lo, hi);
574 		return -EINVAL;
575 	}
576 
577 	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
578 	addr += amdgpu_bo_gpu_offset(bo);
579 	addr -= ((uint64_t)size) * ((uint64_t)index);
580 
581 	amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
582 	amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
583 
584 	return 0;
585 }
586 
587 /**
588  * amdgpu_vce_validate_handle - validate stream handle
589  *
590  * @p: parser context
591  * @handle: handle to validate
592  * @allocated: allocated a new handle?
593  *
594  * Validates the handle and return the found session index or -EINVAL
595  * we we don't have another free session index.
596  */
597 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
598 				      uint32_t handle, uint32_t *allocated)
599 {
600 	unsigned i;
601 
602 	/* validate the handle */
603 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
604 		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
605 			if (p->adev->vce.filp[i] != p->filp) {
606 				DRM_ERROR("VCE handle collision detected!\n");
607 				return -EINVAL;
608 			}
609 			return i;
610 		}
611 	}
612 
613 	/* handle not found try to alloc a new one */
614 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
615 		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
616 			p->adev->vce.filp[i] = p->filp;
617 			p->adev->vce.img_size[i] = 0;
618 			*allocated |= 1 << i;
619 			return i;
620 		}
621 	}
622 
623 	DRM_ERROR("No more free VCE handles!\n");
624 	return -EINVAL;
625 }
626 
627 /**
628  * amdgpu_vce_cs_parse - parse and validate the command stream
629  *
630  * @p: parser context
631  *
632  */
633 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
634 {
635 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
636 	unsigned fb_idx = 0, bs_idx = 0;
637 	int session_idx = -1;
638 	uint32_t destroyed = 0;
639 	uint32_t created = 0;
640 	uint32_t allocated = 0;
641 	uint32_t tmp, handle = 0;
642 	uint32_t *size = &tmp;
643 	int i, r, idx = 0;
644 
645 	p->job->vm = NULL;
646 	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
647 
648 	r = amdgpu_cs_sysvm_access_required(p);
649 	if (r)
650 		return r;
651 
652 	while (idx < ib->length_dw) {
653 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
654 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
655 
656 		if ((len < 8) || (len & 3)) {
657 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
658 			r = -EINVAL;
659 			goto out;
660 		}
661 
662 		switch (cmd) {
663 		case 0x00000001: /* session */
664 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
665 			session_idx = amdgpu_vce_validate_handle(p, handle,
666 								 &allocated);
667 			if (session_idx < 0) {
668 				r = session_idx;
669 				goto out;
670 			}
671 			size = &p->adev->vce.img_size[session_idx];
672 			break;
673 
674 		case 0x00000002: /* task info */
675 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
676 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
677 			break;
678 
679 		case 0x01000001: /* create */
680 			created |= 1 << session_idx;
681 			if (destroyed & (1 << session_idx)) {
682 				destroyed &= ~(1 << session_idx);
683 				allocated |= 1 << session_idx;
684 
685 			} else if (!(allocated & (1 << session_idx))) {
686 				DRM_ERROR("Handle already in use!\n");
687 				r = -EINVAL;
688 				goto out;
689 			}
690 
691 			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
692 				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
693 				8 * 3 / 2;
694 			break;
695 
696 		case 0x04000001: /* config extension */
697 		case 0x04000002: /* pic control */
698 		case 0x04000005: /* rate control */
699 		case 0x04000007: /* motion estimation */
700 		case 0x04000008: /* rdo */
701 		case 0x04000009: /* vui */
702 		case 0x05000002: /* auxiliary buffer */
703 		case 0x05000009: /* clock table */
704 			break;
705 
706 		case 0x0500000c: /* hw config */
707 			switch (p->adev->asic_type) {
708 #ifdef CONFIG_DRM_AMDGPU_CIK
709 			case CHIP_KAVERI:
710 			case CHIP_MULLINS:
711 #endif
712 			case CHIP_CARRIZO:
713 				break;
714 			default:
715 				r = -EINVAL;
716 				goto out;
717 			}
718 			break;
719 
720 		case 0x03000001: /* encode */
721 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
722 						*size, 0);
723 			if (r)
724 				goto out;
725 
726 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
727 						*size / 3, 0);
728 			if (r)
729 				goto out;
730 			break;
731 
732 		case 0x02000001: /* destroy */
733 			destroyed |= 1 << session_idx;
734 			break;
735 
736 		case 0x05000001: /* context buffer */
737 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
738 						*size * 2, 0);
739 			if (r)
740 				goto out;
741 			break;
742 
743 		case 0x05000004: /* video bitstream buffer */
744 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
745 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
746 						tmp, bs_idx);
747 			if (r)
748 				goto out;
749 			break;
750 
751 		case 0x05000005: /* feedback buffer */
752 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
753 						4096, fb_idx);
754 			if (r)
755 				goto out;
756 			break;
757 
758 		default:
759 			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
760 			r = -EINVAL;
761 			goto out;
762 		}
763 
764 		if (session_idx == -1) {
765 			DRM_ERROR("no session command at start of IB\n");
766 			r = -EINVAL;
767 			goto out;
768 		}
769 
770 		idx += len / 4;
771 	}
772 
773 	if (allocated & ~created) {
774 		DRM_ERROR("New session without create command!\n");
775 		r = -ENOENT;
776 	}
777 
778 out:
779 	if (!r) {
780 		/* No error, free all destroyed handle slots */
781 		tmp = destroyed;
782 	} else {
783 		/* Error during parsing, free all allocated handle slots */
784 		tmp = allocated;
785 	}
786 
787 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
788 		if (tmp & (1 << i))
789 			atomic_set(&p->adev->vce.handles[i], 0);
790 
791 	return r;
792 }
793 
794 /**
795  * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
796  *
797  * @p: parser context
798  *
799  */
800 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
801 {
802 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
803 	int session_idx = -1;
804 	uint32_t destroyed = 0;
805 	uint32_t created = 0;
806 	uint32_t allocated = 0;
807 	uint32_t tmp, handle = 0;
808 	int i, r = 0, idx = 0;
809 
810 	while (idx < ib->length_dw) {
811 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
812 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
813 
814 		if ((len < 8) || (len & 3)) {
815 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
816 			r = -EINVAL;
817 			goto out;
818 		}
819 
820 		switch (cmd) {
821 		case 0x00000001: /* session */
822 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
823 			session_idx = amdgpu_vce_validate_handle(p, handle,
824 								 &allocated);
825 			if (session_idx < 0) {
826 				r = session_idx;
827 				goto out;
828 			}
829 			break;
830 
831 		case 0x01000001: /* create */
832 			created |= 1 << session_idx;
833 			if (destroyed & (1 << session_idx)) {
834 				destroyed &= ~(1 << session_idx);
835 				allocated |= 1 << session_idx;
836 
837 			} else if (!(allocated & (1 << session_idx))) {
838 				DRM_ERROR("Handle already in use!\n");
839 				r = -EINVAL;
840 				goto out;
841 			}
842 
843 			break;
844 
845 		case 0x02000001: /* destroy */
846 			destroyed |= 1 << session_idx;
847 			break;
848 
849 		default:
850 			break;
851 		}
852 
853 		if (session_idx == -1) {
854 			DRM_ERROR("no session command at start of IB\n");
855 			r = -EINVAL;
856 			goto out;
857 		}
858 
859 		idx += len / 4;
860 	}
861 
862 	if (allocated & ~created) {
863 		DRM_ERROR("New session without create command!\n");
864 		r = -ENOENT;
865 	}
866 
867 out:
868 	if (!r) {
869 		/* No error, free all destroyed handle slots */
870 		tmp = destroyed;
871 		amdgpu_ib_free(p->adev, ib, NULL);
872 	} else {
873 		/* Error during parsing, free all allocated handle slots */
874 		tmp = allocated;
875 	}
876 
877 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
878 		if (tmp & (1 << i))
879 			atomic_set(&p->adev->vce.handles[i], 0);
880 
881 	return r;
882 }
883 
884 /**
885  * amdgpu_vce_ring_emit_ib - execute indirect buffer
886  *
887  * @ring: engine to use
888  * @ib: the IB to execute
889  *
890  */
891 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
892 			     unsigned vm_id, bool ctx_switch)
893 {
894 	amdgpu_ring_write(ring, VCE_CMD_IB);
895 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
896 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
897 	amdgpu_ring_write(ring, ib->length_dw);
898 }
899 
900 /**
901  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
902  *
903  * @ring: engine to use
904  * @fence: the fence
905  *
906  */
907 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
908 				unsigned flags)
909 {
910 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
911 
912 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
913 	amdgpu_ring_write(ring, addr);
914 	amdgpu_ring_write(ring, upper_32_bits(addr));
915 	amdgpu_ring_write(ring, seq);
916 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
917 	amdgpu_ring_write(ring, VCE_CMD_END);
918 }
919 
920 /**
921  * amdgpu_vce_ring_test_ring - test if VCE ring is working
922  *
923  * @ring: the engine to test on
924  *
925  */
926 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
927 {
928 	struct amdgpu_device *adev = ring->adev;
929 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
930 	unsigned i;
931 	int r;
932 
933 	r = amdgpu_ring_alloc(ring, 16);
934 	if (r) {
935 		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
936 			  ring->idx, r);
937 		return r;
938 	}
939 	amdgpu_ring_write(ring, VCE_CMD_END);
940 	amdgpu_ring_commit(ring);
941 
942 	for (i = 0; i < adev->usec_timeout; i++) {
943 		if (amdgpu_ring_get_rptr(ring) != rptr)
944 			break;
945 		DRM_UDELAY(1);
946 	}
947 
948 	if (i < adev->usec_timeout) {
949 		DRM_INFO("ring test on %d succeeded in %d usecs\n",
950 			 ring->idx, i);
951 	} else {
952 		DRM_ERROR("amdgpu: ring %d test failed\n",
953 			  ring->idx);
954 		r = -ETIMEDOUT;
955 	}
956 
957 	return r;
958 }
959 
960 /**
961  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
962  *
963  * @ring: the engine to test on
964  *
965  */
966 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
967 {
968 	struct dma_fence *fence = NULL;
969 	long r;
970 
971 	/* skip vce ring1/2 ib test for now, since it's not reliable */
972 	if (ring != &ring->adev->vce.ring[0])
973 		return 0;
974 
975 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
976 	if (r) {
977 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
978 		goto error;
979 	}
980 
981 	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
982 	if (r) {
983 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
984 		goto error;
985 	}
986 
987 	r = dma_fence_wait_timeout(fence, false, timeout);
988 	if (r == 0) {
989 		DRM_ERROR("amdgpu: IB test timed out.\n");
990 		r = -ETIMEDOUT;
991 	} else if (r < 0) {
992 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
993 	} else {
994 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
995 		r = 0;
996 	}
997 error:
998 	dma_fence_put(fence);
999 	return r;
1000 }
1001