1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27 
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
36 #include "cikd.h"
37 
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT	msecs_to_jiffies(1000)
40 
41 /* Firmware Names */
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE	"amdgpu/bonaire_vce.bin"
44 #define FIRMWARE_KABINI	"amdgpu/kabini_vce.bin"
45 #define FIRMWARE_KAVERI	"amdgpu/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII	"amdgpu/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS	"amdgpu/mullins_vce.bin"
48 #endif
49 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10	"amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11	"amdgpu/polaris11_vce.bin"
55 #define FIRMWARE_POLARIS12	"amdgpu/polaris12_vce.bin"
56 #define FIRMWARE_VEGAM		"amdgpu/vegam_vce.bin"
57 
58 #define FIRMWARE_VEGA10		"amdgpu/vega10_vce.bin"
59 #define FIRMWARE_VEGA12		"amdgpu/vega12_vce.bin"
60 #define FIRMWARE_VEGA20		"amdgpu/vega20_vce.bin"
61 
62 #ifdef CONFIG_DRM_AMDGPU_CIK
63 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
64 MODULE_FIRMWARE(FIRMWARE_KABINI);
65 MODULE_FIRMWARE(FIRMWARE_KAVERI);
66 MODULE_FIRMWARE(FIRMWARE_HAWAII);
67 MODULE_FIRMWARE(FIRMWARE_MULLINS);
68 #endif
69 MODULE_FIRMWARE(FIRMWARE_TONGA);
70 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
71 MODULE_FIRMWARE(FIRMWARE_FIJI);
72 MODULE_FIRMWARE(FIRMWARE_STONEY);
73 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
74 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
75 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
76 MODULE_FIRMWARE(FIRMWARE_VEGAM);
77 
78 MODULE_FIRMWARE(FIRMWARE_VEGA10);
79 MODULE_FIRMWARE(FIRMWARE_VEGA12);
80 MODULE_FIRMWARE(FIRMWARE_VEGA20);
81 
82 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
83 
84 /**
85  * amdgpu_vce_init - allocate memory, load vce firmware
86  *
87  * @adev: amdgpu_device pointer
88  *
89  * First step to get VCE online, allocate memory and load the firmware
90  */
91 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
92 {
93 	struct amdgpu_ring *ring;
94 	struct drm_sched_rq *rq;
95 	const char *fw_name;
96 	const struct common_firmware_header *hdr;
97 	unsigned ucode_version, version_major, version_minor, binary_id;
98 	int i, r;
99 
100 	switch (adev->asic_type) {
101 #ifdef CONFIG_DRM_AMDGPU_CIK
102 	case CHIP_BONAIRE:
103 		fw_name = FIRMWARE_BONAIRE;
104 		break;
105 	case CHIP_KAVERI:
106 		fw_name = FIRMWARE_KAVERI;
107 		break;
108 	case CHIP_KABINI:
109 		fw_name = FIRMWARE_KABINI;
110 		break;
111 	case CHIP_HAWAII:
112 		fw_name = FIRMWARE_HAWAII;
113 		break;
114 	case CHIP_MULLINS:
115 		fw_name = FIRMWARE_MULLINS;
116 		break;
117 #endif
118 	case CHIP_TONGA:
119 		fw_name = FIRMWARE_TONGA;
120 		break;
121 	case CHIP_CARRIZO:
122 		fw_name = FIRMWARE_CARRIZO;
123 		break;
124 	case CHIP_FIJI:
125 		fw_name = FIRMWARE_FIJI;
126 		break;
127 	case CHIP_STONEY:
128 		fw_name = FIRMWARE_STONEY;
129 		break;
130 	case CHIP_POLARIS10:
131 		fw_name = FIRMWARE_POLARIS10;
132 		break;
133 	case CHIP_POLARIS11:
134 		fw_name = FIRMWARE_POLARIS11;
135 		break;
136 	case CHIP_POLARIS12:
137 		fw_name = FIRMWARE_POLARIS12;
138 		break;
139 	case CHIP_VEGAM:
140 		fw_name = FIRMWARE_VEGAM;
141 		break;
142 	case CHIP_VEGA10:
143 		fw_name = FIRMWARE_VEGA10;
144 		break;
145 	case CHIP_VEGA12:
146 		fw_name = FIRMWARE_VEGA12;
147 		break;
148 	case CHIP_VEGA20:
149 		fw_name = FIRMWARE_VEGA20;
150 		break;
151 
152 	default:
153 		return -EINVAL;
154 	}
155 
156 	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
157 	if (r) {
158 		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
159 			fw_name);
160 		return r;
161 	}
162 
163 	r = amdgpu_ucode_validate(adev->vce.fw);
164 	if (r) {
165 		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
166 			fw_name);
167 		release_firmware(adev->vce.fw);
168 		adev->vce.fw = NULL;
169 		return r;
170 	}
171 
172 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
173 
174 	ucode_version = le32_to_cpu(hdr->ucode_version);
175 	version_major = (ucode_version >> 20) & 0xfff;
176 	version_minor = (ucode_version >> 8) & 0xfff;
177 	binary_id = ucode_version & 0xff;
178 	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
179 		version_major, version_minor, binary_id);
180 	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
181 				(binary_id << 8));
182 
183 	r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
184 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
185 				    &adev->vce.gpu_addr, &adev->vce.cpu_addr);
186 	if (r) {
187 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
188 		return r;
189 	}
190 
191 	ring = &adev->vce.ring[0];
192 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
193 	r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
194 	if (r != 0) {
195 		DRM_ERROR("Failed setting up VCE run queue.\n");
196 		return r;
197 	}
198 
199 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
200 		atomic_set(&adev->vce.handles[i], 0);
201 		adev->vce.filp[i] = NULL;
202 	}
203 
204 	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
205 	mutex_init(&adev->vce.idle_mutex);
206 
207 	return 0;
208 }
209 
210 /**
211  * amdgpu_vce_fini - free memory
212  *
213  * @adev: amdgpu_device pointer
214  *
215  * Last step on VCE teardown, free firmware memory
216  */
217 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
218 {
219 	unsigned i;
220 
221 	if (adev->vce.vcpu_bo == NULL)
222 		return 0;
223 
224 	drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
225 
226 	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
227 		(void **)&adev->vce.cpu_addr);
228 
229 	for (i = 0; i < adev->vce.num_rings; i++)
230 		amdgpu_ring_fini(&adev->vce.ring[i]);
231 
232 	release_firmware(adev->vce.fw);
233 	mutex_destroy(&adev->vce.idle_mutex);
234 
235 	return 0;
236 }
237 
238 /**
239  * amdgpu_vce_suspend - unpin VCE fw memory
240  *
241  * @adev: amdgpu_device pointer
242  *
243  */
244 int amdgpu_vce_suspend(struct amdgpu_device *adev)
245 {
246 	int i;
247 
248 	if (adev->vce.vcpu_bo == NULL)
249 		return 0;
250 
251 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
252 		if (atomic_read(&adev->vce.handles[i]))
253 			break;
254 
255 	if (i == AMDGPU_MAX_VCE_HANDLES)
256 		return 0;
257 
258 	cancel_delayed_work_sync(&adev->vce.idle_work);
259 	/* TODO: suspending running encoding sessions isn't supported */
260 	return -EINVAL;
261 }
262 
263 /**
264  * amdgpu_vce_resume - pin VCE fw memory
265  *
266  * @adev: amdgpu_device pointer
267  *
268  */
269 int amdgpu_vce_resume(struct amdgpu_device *adev)
270 {
271 	void *cpu_addr;
272 	const struct common_firmware_header *hdr;
273 	unsigned offset;
274 	int r;
275 
276 	if (adev->vce.vcpu_bo == NULL)
277 		return -EINVAL;
278 
279 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
280 	if (r) {
281 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
282 		return r;
283 	}
284 
285 	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
286 	if (r) {
287 		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
288 		dev_err(adev->dev, "(%d) VCE map failed\n", r);
289 		return r;
290 	}
291 
292 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
293 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
294 	memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
295 		    adev->vce.fw->size - offset);
296 
297 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
298 
299 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
300 
301 	return 0;
302 }
303 
304 /**
305  * amdgpu_vce_idle_work_handler - power off VCE
306  *
307  * @work: pointer to work structure
308  *
309  * power of VCE when it's not used any more
310  */
311 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
312 {
313 	struct amdgpu_device *adev =
314 		container_of(work, struct amdgpu_device, vce.idle_work.work);
315 	unsigned i, count = 0;
316 
317 	for (i = 0; i < adev->vce.num_rings; i++)
318 		count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
319 
320 	if (count == 0) {
321 		if (adev->pm.dpm_enabled) {
322 			amdgpu_dpm_enable_vce(adev, false);
323 		} else {
324 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
325 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
326 							       AMD_PG_STATE_GATE);
327 			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
328 							       AMD_CG_STATE_GATE);
329 		}
330 	} else {
331 		schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
332 	}
333 }
334 
335 /**
336  * amdgpu_vce_ring_begin_use - power up VCE
337  *
338  * @ring: amdgpu ring
339  *
340  * Make sure VCE is powerd up when we want to use it
341  */
342 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
343 {
344 	struct amdgpu_device *adev = ring->adev;
345 	bool set_clocks;
346 
347 	if (amdgpu_sriov_vf(adev))
348 		return;
349 
350 	mutex_lock(&adev->vce.idle_mutex);
351 	set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
352 	if (set_clocks) {
353 		if (adev->pm.dpm_enabled) {
354 			amdgpu_dpm_enable_vce(adev, true);
355 		} else {
356 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
357 			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
358 							       AMD_CG_STATE_UNGATE);
359 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
360 							       AMD_PG_STATE_UNGATE);
361 
362 		}
363 	}
364 	mutex_unlock(&adev->vce.idle_mutex);
365 }
366 
367 /**
368  * amdgpu_vce_ring_end_use - power VCE down
369  *
370  * @ring: amdgpu ring
371  *
372  * Schedule work to power VCE down again
373  */
374 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
375 {
376 	if (!amdgpu_sriov_vf(ring->adev))
377 		schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
378 }
379 
380 /**
381  * amdgpu_vce_free_handles - free still open VCE handles
382  *
383  * @adev: amdgpu_device pointer
384  * @filp: drm file pointer
385  *
386  * Close all VCE handles still open by this file pointer
387  */
388 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
389 {
390 	struct amdgpu_ring *ring = &adev->vce.ring[0];
391 	int i, r;
392 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
393 		uint32_t handle = atomic_read(&adev->vce.handles[i]);
394 
395 		if (!handle || adev->vce.filp[i] != filp)
396 			continue;
397 
398 		r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
399 		if (r)
400 			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
401 
402 		adev->vce.filp[i] = NULL;
403 		atomic_set(&adev->vce.handles[i], 0);
404 	}
405 }
406 
407 /**
408  * amdgpu_vce_get_create_msg - generate a VCE create msg
409  *
410  * @adev: amdgpu_device pointer
411  * @ring: ring we should submit the msg to
412  * @handle: VCE session handle to use
413  * @fence: optional fence to return
414  *
415  * Open up a stream for HW test
416  */
417 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
418 			      struct dma_fence **fence)
419 {
420 	const unsigned ib_size_dw = 1024;
421 	struct amdgpu_job *job;
422 	struct amdgpu_ib *ib;
423 	struct dma_fence *f = NULL;
424 	uint64_t dummy;
425 	int i, r;
426 
427 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
428 	if (r)
429 		return r;
430 
431 	ib = &job->ibs[0];
432 
433 	dummy = ib->gpu_addr + 1024;
434 
435 	/* stitch together an VCE create msg */
436 	ib->length_dw = 0;
437 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
438 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
439 	ib->ptr[ib->length_dw++] = handle;
440 
441 	if ((ring->adev->vce.fw_version >> 24) >= 52)
442 		ib->ptr[ib->length_dw++] = 0x00000040; /* len */
443 	else
444 		ib->ptr[ib->length_dw++] = 0x00000030; /* len */
445 	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
446 	ib->ptr[ib->length_dw++] = 0x00000000;
447 	ib->ptr[ib->length_dw++] = 0x00000042;
448 	ib->ptr[ib->length_dw++] = 0x0000000a;
449 	ib->ptr[ib->length_dw++] = 0x00000001;
450 	ib->ptr[ib->length_dw++] = 0x00000080;
451 	ib->ptr[ib->length_dw++] = 0x00000060;
452 	ib->ptr[ib->length_dw++] = 0x00000100;
453 	ib->ptr[ib->length_dw++] = 0x00000100;
454 	ib->ptr[ib->length_dw++] = 0x0000000c;
455 	ib->ptr[ib->length_dw++] = 0x00000000;
456 	if ((ring->adev->vce.fw_version >> 24) >= 52) {
457 		ib->ptr[ib->length_dw++] = 0x00000000;
458 		ib->ptr[ib->length_dw++] = 0x00000000;
459 		ib->ptr[ib->length_dw++] = 0x00000000;
460 		ib->ptr[ib->length_dw++] = 0x00000000;
461 	}
462 
463 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
464 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
465 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
466 	ib->ptr[ib->length_dw++] = dummy;
467 	ib->ptr[ib->length_dw++] = 0x00000001;
468 
469 	for (i = ib->length_dw; i < ib_size_dw; ++i)
470 		ib->ptr[i] = 0x0;
471 
472 	r = amdgpu_job_submit_direct(job, ring, &f);
473 	if (r)
474 		goto err;
475 
476 	if (fence)
477 		*fence = dma_fence_get(f);
478 	dma_fence_put(f);
479 	return 0;
480 
481 err:
482 	amdgpu_job_free(job);
483 	return r;
484 }
485 
486 /**
487  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
488  *
489  * @adev: amdgpu_device pointer
490  * @ring: ring we should submit the msg to
491  * @handle: VCE session handle to use
492  * @fence: optional fence to return
493  *
494  * Close up a stream for HW test or if userspace failed to do so
495  */
496 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
497 			       bool direct, struct dma_fence **fence)
498 {
499 	const unsigned ib_size_dw = 1024;
500 	struct amdgpu_job *job;
501 	struct amdgpu_ib *ib;
502 	struct dma_fence *f = NULL;
503 	int i, r;
504 
505 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
506 	if (r)
507 		return r;
508 
509 	ib = &job->ibs[0];
510 
511 	/* stitch together an VCE destroy msg */
512 	ib->length_dw = 0;
513 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
514 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
515 	ib->ptr[ib->length_dw++] = handle;
516 
517 	ib->ptr[ib->length_dw++] = 0x00000020; /* len */
518 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
519 	ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
520 	ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
521 	ib->ptr[ib->length_dw++] = 0x00000000;
522 	ib->ptr[ib->length_dw++] = 0x00000000;
523 	ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
524 	ib->ptr[ib->length_dw++] = 0x00000000;
525 
526 	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
527 	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
528 
529 	for (i = ib->length_dw; i < ib_size_dw; ++i)
530 		ib->ptr[i] = 0x0;
531 
532 	if (direct)
533 		r = amdgpu_job_submit_direct(job, ring, &f);
534 	else
535 		r = amdgpu_job_submit(job, &ring->adev->vce.entity,
536 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
537 	if (r)
538 		goto err;
539 
540 	if (fence)
541 		*fence = dma_fence_get(f);
542 	dma_fence_put(f);
543 	return 0;
544 
545 err:
546 	amdgpu_job_free(job);
547 	return r;
548 }
549 
550 /**
551  * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
552  *
553  * @p: parser context
554  * @lo: address of lower dword
555  * @hi: address of higher dword
556  * @size: minimum size
557  * @index: bs/fb index
558  *
559  * Make sure that no BO cross a 4GB boundary.
560  */
561 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
562 				  int lo, int hi, unsigned size, int32_t index)
563 {
564 	int64_t offset = ((uint64_t)size) * ((int64_t)index);
565 	struct ttm_operation_ctx ctx = { false, false };
566 	struct amdgpu_bo_va_mapping *mapping;
567 	unsigned i, fpfn, lpfn;
568 	struct amdgpu_bo *bo;
569 	uint64_t addr;
570 	int r;
571 
572 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
573 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
574 	if (index >= 0) {
575 		addr += offset;
576 		fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
577 		lpfn = 0x100000000ULL >> PAGE_SHIFT;
578 	} else {
579 		fpfn = 0;
580 		lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
581 	}
582 
583 	r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
584 	if (r) {
585 		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
586 			  addr, lo, hi, size, index);
587 		return r;
588 	}
589 
590 	for (i = 0; i < bo->placement.num_placement; ++i) {
591 		bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
592 		bo->placements[i].lpfn = bo->placements[i].lpfn ?
593 			min(bo->placements[i].lpfn, lpfn) : lpfn;
594 	}
595 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
596 }
597 
598 
599 /**
600  * amdgpu_vce_cs_reloc - command submission relocation
601  *
602  * @p: parser context
603  * @lo: address of lower dword
604  * @hi: address of higher dword
605  * @size: minimum size
606  *
607  * Patch relocation inside command stream with real buffer address
608  */
609 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
610 			       int lo, int hi, unsigned size, uint32_t index)
611 {
612 	struct amdgpu_bo_va_mapping *mapping;
613 	struct amdgpu_bo *bo;
614 	uint64_t addr;
615 	int r;
616 
617 	if (index == 0xffffffff)
618 		index = 0;
619 
620 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
621 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
622 	addr += ((uint64_t)size) * ((uint64_t)index);
623 
624 	r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
625 	if (r) {
626 		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
627 			  addr, lo, hi, size, index);
628 		return r;
629 	}
630 
631 	if ((addr + (uint64_t)size) >
632 	    (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
633 		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
634 			  addr, lo, hi);
635 		return -EINVAL;
636 	}
637 
638 	addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
639 	addr += amdgpu_bo_gpu_offset(bo);
640 	addr -= ((uint64_t)size) * ((uint64_t)index);
641 
642 	amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
643 	amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
644 
645 	return 0;
646 }
647 
648 /**
649  * amdgpu_vce_validate_handle - validate stream handle
650  *
651  * @p: parser context
652  * @handle: handle to validate
653  * @allocated: allocated a new handle?
654  *
655  * Validates the handle and return the found session index or -EINVAL
656  * we we don't have another free session index.
657  */
658 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
659 				      uint32_t handle, uint32_t *allocated)
660 {
661 	unsigned i;
662 
663 	/* validate the handle */
664 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
665 		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
666 			if (p->adev->vce.filp[i] != p->filp) {
667 				DRM_ERROR("VCE handle collision detected!\n");
668 				return -EINVAL;
669 			}
670 			return i;
671 		}
672 	}
673 
674 	/* handle not found try to alloc a new one */
675 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
676 		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
677 			p->adev->vce.filp[i] = p->filp;
678 			p->adev->vce.img_size[i] = 0;
679 			*allocated |= 1 << i;
680 			return i;
681 		}
682 	}
683 
684 	DRM_ERROR("No more free VCE handles!\n");
685 	return -EINVAL;
686 }
687 
688 /**
689  * amdgpu_vce_cs_parse - parse and validate the command stream
690  *
691  * @p: parser context
692  *
693  */
694 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
695 {
696 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
697 	unsigned fb_idx = 0, bs_idx = 0;
698 	int session_idx = -1;
699 	uint32_t destroyed = 0;
700 	uint32_t created = 0;
701 	uint32_t allocated = 0;
702 	uint32_t tmp, handle = 0;
703 	uint32_t *size = &tmp;
704 	unsigned idx;
705 	int i, r = 0;
706 
707 	p->job->vm = NULL;
708 	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
709 
710 	for (idx = 0; idx < ib->length_dw;) {
711 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
712 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
713 
714 		if ((len < 8) || (len & 3)) {
715 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
716 			r = -EINVAL;
717 			goto out;
718 		}
719 
720 		switch (cmd) {
721 		case 0x00000002: /* task info */
722 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
723 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
724 			break;
725 
726 		case 0x03000001: /* encode */
727 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
728 						   idx + 9, 0, 0);
729 			if (r)
730 				goto out;
731 
732 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
733 						   idx + 11, 0, 0);
734 			if (r)
735 				goto out;
736 			break;
737 
738 		case 0x05000001: /* context buffer */
739 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
740 						   idx + 2, 0, 0);
741 			if (r)
742 				goto out;
743 			break;
744 
745 		case 0x05000004: /* video bitstream buffer */
746 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
747 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
748 						   tmp, bs_idx);
749 			if (r)
750 				goto out;
751 			break;
752 
753 		case 0x05000005: /* feedback buffer */
754 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
755 						   4096, fb_idx);
756 			if (r)
757 				goto out;
758 			break;
759 
760 		case 0x0500000d: /* MV buffer */
761 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
762 							idx + 2, 0, 0);
763 			if (r)
764 				goto out;
765 
766 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
767 							idx + 7, 0, 0);
768 			if (r)
769 				goto out;
770 			break;
771 		}
772 
773 		idx += len / 4;
774 	}
775 
776 	for (idx = 0; idx < ib->length_dw;) {
777 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
778 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
779 
780 		switch (cmd) {
781 		case 0x00000001: /* session */
782 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
783 			session_idx = amdgpu_vce_validate_handle(p, handle,
784 								 &allocated);
785 			if (session_idx < 0) {
786 				r = session_idx;
787 				goto out;
788 			}
789 			size = &p->adev->vce.img_size[session_idx];
790 			break;
791 
792 		case 0x00000002: /* task info */
793 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
794 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
795 			break;
796 
797 		case 0x01000001: /* create */
798 			created |= 1 << session_idx;
799 			if (destroyed & (1 << session_idx)) {
800 				destroyed &= ~(1 << session_idx);
801 				allocated |= 1 << session_idx;
802 
803 			} else if (!(allocated & (1 << session_idx))) {
804 				DRM_ERROR("Handle already in use!\n");
805 				r = -EINVAL;
806 				goto out;
807 			}
808 
809 			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
810 				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
811 				8 * 3 / 2;
812 			break;
813 
814 		case 0x04000001: /* config extension */
815 		case 0x04000002: /* pic control */
816 		case 0x04000005: /* rate control */
817 		case 0x04000007: /* motion estimation */
818 		case 0x04000008: /* rdo */
819 		case 0x04000009: /* vui */
820 		case 0x05000002: /* auxiliary buffer */
821 		case 0x05000009: /* clock table */
822 			break;
823 
824 		case 0x0500000c: /* hw config */
825 			switch (p->adev->asic_type) {
826 #ifdef CONFIG_DRM_AMDGPU_CIK
827 			case CHIP_KAVERI:
828 			case CHIP_MULLINS:
829 #endif
830 			case CHIP_CARRIZO:
831 				break;
832 			default:
833 				r = -EINVAL;
834 				goto out;
835 			}
836 			break;
837 
838 		case 0x03000001: /* encode */
839 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
840 						*size, 0);
841 			if (r)
842 				goto out;
843 
844 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
845 						*size / 3, 0);
846 			if (r)
847 				goto out;
848 			break;
849 
850 		case 0x02000001: /* destroy */
851 			destroyed |= 1 << session_idx;
852 			break;
853 
854 		case 0x05000001: /* context buffer */
855 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
856 						*size * 2, 0);
857 			if (r)
858 				goto out;
859 			break;
860 
861 		case 0x05000004: /* video bitstream buffer */
862 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
863 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
864 						tmp, bs_idx);
865 			if (r)
866 				goto out;
867 			break;
868 
869 		case 0x05000005: /* feedback buffer */
870 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
871 						4096, fb_idx);
872 			if (r)
873 				goto out;
874 			break;
875 
876 		case 0x0500000d: /* MV buffer */
877 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
878 							idx + 2, *size, 0);
879 			if (r)
880 				goto out;
881 
882 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
883 							idx + 7, *size / 12, 0);
884 			if (r)
885 				goto out;
886 			break;
887 
888 		default:
889 			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
890 			r = -EINVAL;
891 			goto out;
892 		}
893 
894 		if (session_idx == -1) {
895 			DRM_ERROR("no session command at start of IB\n");
896 			r = -EINVAL;
897 			goto out;
898 		}
899 
900 		idx += len / 4;
901 	}
902 
903 	if (allocated & ~created) {
904 		DRM_ERROR("New session without create command!\n");
905 		r = -ENOENT;
906 	}
907 
908 out:
909 	if (!r) {
910 		/* No error, free all destroyed handle slots */
911 		tmp = destroyed;
912 	} else {
913 		/* Error during parsing, free all allocated handle slots */
914 		tmp = allocated;
915 	}
916 
917 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
918 		if (tmp & (1 << i))
919 			atomic_set(&p->adev->vce.handles[i], 0);
920 
921 	return r;
922 }
923 
924 /**
925  * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
926  *
927  * @p: parser context
928  *
929  */
930 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
931 {
932 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
933 	int session_idx = -1;
934 	uint32_t destroyed = 0;
935 	uint32_t created = 0;
936 	uint32_t allocated = 0;
937 	uint32_t tmp, handle = 0;
938 	int i, r = 0, idx = 0;
939 
940 	while (idx < ib->length_dw) {
941 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
942 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
943 
944 		if ((len < 8) || (len & 3)) {
945 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
946 			r = -EINVAL;
947 			goto out;
948 		}
949 
950 		switch (cmd) {
951 		case 0x00000001: /* session */
952 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
953 			session_idx = amdgpu_vce_validate_handle(p, handle,
954 								 &allocated);
955 			if (session_idx < 0) {
956 				r = session_idx;
957 				goto out;
958 			}
959 			break;
960 
961 		case 0x01000001: /* create */
962 			created |= 1 << session_idx;
963 			if (destroyed & (1 << session_idx)) {
964 				destroyed &= ~(1 << session_idx);
965 				allocated |= 1 << session_idx;
966 
967 			} else if (!(allocated & (1 << session_idx))) {
968 				DRM_ERROR("Handle already in use!\n");
969 				r = -EINVAL;
970 				goto out;
971 			}
972 
973 			break;
974 
975 		case 0x02000001: /* destroy */
976 			destroyed |= 1 << session_idx;
977 			break;
978 
979 		default:
980 			break;
981 		}
982 
983 		if (session_idx == -1) {
984 			DRM_ERROR("no session command at start of IB\n");
985 			r = -EINVAL;
986 			goto out;
987 		}
988 
989 		idx += len / 4;
990 	}
991 
992 	if (allocated & ~created) {
993 		DRM_ERROR("New session without create command!\n");
994 		r = -ENOENT;
995 	}
996 
997 out:
998 	if (!r) {
999 		/* No error, free all destroyed handle slots */
1000 		tmp = destroyed;
1001 		amdgpu_ib_free(p->adev, ib, NULL);
1002 	} else {
1003 		/* Error during parsing, free all allocated handle slots */
1004 		tmp = allocated;
1005 	}
1006 
1007 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1008 		if (tmp & (1 << i))
1009 			atomic_set(&p->adev->vce.handles[i], 0);
1010 
1011 	return r;
1012 }
1013 
1014 /**
1015  * amdgpu_vce_ring_emit_ib - execute indirect buffer
1016  *
1017  * @ring: engine to use
1018  * @ib: the IB to execute
1019  *
1020  */
1021 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
1022 			     unsigned vmid, bool ctx_switch)
1023 {
1024 	amdgpu_ring_write(ring, VCE_CMD_IB);
1025 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1026 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1027 	amdgpu_ring_write(ring, ib->length_dw);
1028 }
1029 
1030 /**
1031  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1032  *
1033  * @ring: engine to use
1034  * @fence: the fence
1035  *
1036  */
1037 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1038 				unsigned flags)
1039 {
1040 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1041 
1042 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
1043 	amdgpu_ring_write(ring, addr);
1044 	amdgpu_ring_write(ring, upper_32_bits(addr));
1045 	amdgpu_ring_write(ring, seq);
1046 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
1047 	amdgpu_ring_write(ring, VCE_CMD_END);
1048 }
1049 
1050 /**
1051  * amdgpu_vce_ring_test_ring - test if VCE ring is working
1052  *
1053  * @ring: the engine to test on
1054  *
1055  */
1056 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1057 {
1058 	struct amdgpu_device *adev = ring->adev;
1059 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
1060 	unsigned i;
1061 	int r, timeout = adev->usec_timeout;
1062 
1063 	/* skip ring test for sriov*/
1064 	if (amdgpu_sriov_vf(adev))
1065 		return 0;
1066 
1067 	r = amdgpu_ring_alloc(ring, 16);
1068 	if (r) {
1069 		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
1070 			  ring->idx, r);
1071 		return r;
1072 	}
1073 	amdgpu_ring_write(ring, VCE_CMD_END);
1074 	amdgpu_ring_commit(ring);
1075 
1076 	for (i = 0; i < timeout; i++) {
1077 		if (amdgpu_ring_get_rptr(ring) != rptr)
1078 			break;
1079 		DRM_UDELAY(1);
1080 	}
1081 
1082 	if (i < timeout) {
1083 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
1084 			 ring->idx, i);
1085 	} else {
1086 		DRM_ERROR("amdgpu: ring %d test failed\n",
1087 			  ring->idx);
1088 		r = -ETIMEDOUT;
1089 	}
1090 
1091 	return r;
1092 }
1093 
1094 /**
1095  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1096  *
1097  * @ring: the engine to test on
1098  *
1099  */
1100 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1101 {
1102 	struct dma_fence *fence = NULL;
1103 	long r;
1104 
1105 	/* skip vce ring1/2 ib test for now, since it's not reliable */
1106 	if (ring != &ring->adev->vce.ring[0])
1107 		return 0;
1108 
1109 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1110 	if (r) {
1111 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
1112 		goto error;
1113 	}
1114 
1115 	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1116 	if (r) {
1117 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
1118 		goto error;
1119 	}
1120 
1121 	r = dma_fence_wait_timeout(fence, false, timeout);
1122 	if (r == 0) {
1123 		DRM_ERROR("amdgpu: IB test timed out.\n");
1124 		r = -ETIMEDOUT;
1125 	} else if (r < 0) {
1126 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1127 	} else {
1128 		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
1129 		r = 0;
1130 	}
1131 error:
1132 	dma_fence_put(fence);
1133 	return r;
1134 }
1135