1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27 
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
36 #include "cikd.h"
37 
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT	msecs_to_jiffies(1000)
40 
41 /* Firmware Names */
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE	"radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI	"radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI	"radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII	"radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS	"radeon/mullins_vce.bin"
48 #endif
49 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10	"amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11         "amdgpu/polaris11_vce.bin"
55 #define FIRMWARE_POLARIS12         "amdgpu/polaris12_vce.bin"
56 
57 #define FIRMWARE_VEGA10		"amdgpu/vega10_vce.bin"
58 #define FIRMWARE_VEGA12		"amdgpu/vega12_vce.bin"
59 
60 #ifdef CONFIG_DRM_AMDGPU_CIK
61 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
62 MODULE_FIRMWARE(FIRMWARE_KABINI);
63 MODULE_FIRMWARE(FIRMWARE_KAVERI);
64 MODULE_FIRMWARE(FIRMWARE_HAWAII);
65 MODULE_FIRMWARE(FIRMWARE_MULLINS);
66 #endif
67 MODULE_FIRMWARE(FIRMWARE_TONGA);
68 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
69 MODULE_FIRMWARE(FIRMWARE_FIJI);
70 MODULE_FIRMWARE(FIRMWARE_STONEY);
71 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
72 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
73 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
74 
75 MODULE_FIRMWARE(FIRMWARE_VEGA10);
76 MODULE_FIRMWARE(FIRMWARE_VEGA12);
77 
78 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
79 
80 /**
81  * amdgpu_vce_init - allocate memory, load vce firmware
82  *
83  * @adev: amdgpu_device pointer
84  *
85  * First step to get VCE online, allocate memory and load the firmware
86  */
87 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
88 {
89 	struct amdgpu_ring *ring;
90 	struct drm_sched_rq *rq;
91 	const char *fw_name;
92 	const struct common_firmware_header *hdr;
93 	unsigned ucode_version, version_major, version_minor, binary_id;
94 	int i, r;
95 
96 	switch (adev->asic_type) {
97 #ifdef CONFIG_DRM_AMDGPU_CIK
98 	case CHIP_BONAIRE:
99 		fw_name = FIRMWARE_BONAIRE;
100 		break;
101 	case CHIP_KAVERI:
102 		fw_name = FIRMWARE_KAVERI;
103 		break;
104 	case CHIP_KABINI:
105 		fw_name = FIRMWARE_KABINI;
106 		break;
107 	case CHIP_HAWAII:
108 		fw_name = FIRMWARE_HAWAII;
109 		break;
110 	case CHIP_MULLINS:
111 		fw_name = FIRMWARE_MULLINS;
112 		break;
113 #endif
114 	case CHIP_TONGA:
115 		fw_name = FIRMWARE_TONGA;
116 		break;
117 	case CHIP_CARRIZO:
118 		fw_name = FIRMWARE_CARRIZO;
119 		break;
120 	case CHIP_FIJI:
121 		fw_name = FIRMWARE_FIJI;
122 		break;
123 	case CHIP_STONEY:
124 		fw_name = FIRMWARE_STONEY;
125 		break;
126 	case CHIP_POLARIS10:
127 		fw_name = FIRMWARE_POLARIS10;
128 		break;
129 	case CHIP_POLARIS11:
130 		fw_name = FIRMWARE_POLARIS11;
131 		break;
132 	case CHIP_POLARIS12:
133 		fw_name = FIRMWARE_POLARIS12;
134 		break;
135 	case CHIP_VEGA10:
136 		fw_name = FIRMWARE_VEGA10;
137 		break;
138 	case CHIP_VEGA12:
139 		fw_name = FIRMWARE_VEGA12;
140 		break;
141 
142 	default:
143 		return -EINVAL;
144 	}
145 
146 	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
147 	if (r) {
148 		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
149 			fw_name);
150 		return r;
151 	}
152 
153 	r = amdgpu_ucode_validate(adev->vce.fw);
154 	if (r) {
155 		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
156 			fw_name);
157 		release_firmware(adev->vce.fw);
158 		adev->vce.fw = NULL;
159 		return r;
160 	}
161 
162 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
163 
164 	ucode_version = le32_to_cpu(hdr->ucode_version);
165 	version_major = (ucode_version >> 20) & 0xfff;
166 	version_minor = (ucode_version >> 8) & 0xfff;
167 	binary_id = ucode_version & 0xff;
168 	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
169 		version_major, version_minor, binary_id);
170 	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
171 				(binary_id << 8));
172 
173 	r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
174 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
175 				    &adev->vce.gpu_addr, &adev->vce.cpu_addr);
176 	if (r) {
177 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
178 		return r;
179 	}
180 
181 	ring = &adev->vce.ring[0];
182 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
183 	r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
184 				  rq, amdgpu_sched_jobs, NULL);
185 	if (r != 0) {
186 		DRM_ERROR("Failed setting up VCE run queue.\n");
187 		return r;
188 	}
189 
190 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
191 		atomic_set(&adev->vce.handles[i], 0);
192 		adev->vce.filp[i] = NULL;
193 	}
194 
195 	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
196 	mutex_init(&adev->vce.idle_mutex);
197 
198 	return 0;
199 }
200 
201 /**
202  * amdgpu_vce_fini - free memory
203  *
204  * @adev: amdgpu_device pointer
205  *
206  * Last step on VCE teardown, free firmware memory
207  */
208 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
209 {
210 	unsigned i;
211 
212 	if (adev->vce.vcpu_bo == NULL)
213 		return 0;
214 
215 	drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
216 
217 	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
218 		(void **)&adev->vce.cpu_addr);
219 
220 	for (i = 0; i < adev->vce.num_rings; i++)
221 		amdgpu_ring_fini(&adev->vce.ring[i]);
222 
223 	release_firmware(adev->vce.fw);
224 	mutex_destroy(&adev->vce.idle_mutex);
225 
226 	return 0;
227 }
228 
229 /**
230  * amdgpu_vce_suspend - unpin VCE fw memory
231  *
232  * @adev: amdgpu_device pointer
233  *
234  */
235 int amdgpu_vce_suspend(struct amdgpu_device *adev)
236 {
237 	int i;
238 
239 	if (adev->vce.vcpu_bo == NULL)
240 		return 0;
241 
242 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
243 		if (atomic_read(&adev->vce.handles[i]))
244 			break;
245 
246 	if (i == AMDGPU_MAX_VCE_HANDLES)
247 		return 0;
248 
249 	cancel_delayed_work_sync(&adev->vce.idle_work);
250 	/* TODO: suspending running encoding sessions isn't supported */
251 	return -EINVAL;
252 }
253 
254 /**
255  * amdgpu_vce_resume - pin VCE fw memory
256  *
257  * @adev: amdgpu_device pointer
258  *
259  */
260 int amdgpu_vce_resume(struct amdgpu_device *adev)
261 {
262 	void *cpu_addr;
263 	const struct common_firmware_header *hdr;
264 	unsigned offset;
265 	int r;
266 
267 	if (adev->vce.vcpu_bo == NULL)
268 		return -EINVAL;
269 
270 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
271 	if (r) {
272 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
273 		return r;
274 	}
275 
276 	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
277 	if (r) {
278 		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
279 		dev_err(adev->dev, "(%d) VCE map failed\n", r);
280 		return r;
281 	}
282 
283 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
284 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
285 	memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
286 		    adev->vce.fw->size - offset);
287 
288 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
289 
290 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
291 
292 	return 0;
293 }
294 
295 /**
296  * amdgpu_vce_idle_work_handler - power off VCE
297  *
298  * @work: pointer to work structure
299  *
300  * power of VCE when it's not used any more
301  */
302 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
303 {
304 	struct amdgpu_device *adev =
305 		container_of(work, struct amdgpu_device, vce.idle_work.work);
306 	unsigned i, count = 0;
307 
308 	for (i = 0; i < adev->vce.num_rings; i++)
309 		count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
310 
311 	if (count == 0) {
312 		if (adev->pm.dpm_enabled) {
313 			amdgpu_dpm_enable_vce(adev, false);
314 		} else {
315 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
316 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
317 							       AMD_PG_STATE_GATE);
318 			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
319 							       AMD_CG_STATE_GATE);
320 		}
321 	} else {
322 		schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
323 	}
324 }
325 
326 /**
327  * amdgpu_vce_ring_begin_use - power up VCE
328  *
329  * @ring: amdgpu ring
330  *
331  * Make sure VCE is powerd up when we want to use it
332  */
333 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
334 {
335 	struct amdgpu_device *adev = ring->adev;
336 	bool set_clocks;
337 
338 	if (amdgpu_sriov_vf(adev))
339 		return;
340 
341 	mutex_lock(&adev->vce.idle_mutex);
342 	set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
343 	if (set_clocks) {
344 		if (adev->pm.dpm_enabled) {
345 			amdgpu_dpm_enable_vce(adev, true);
346 		} else {
347 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
348 			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
349 							       AMD_CG_STATE_UNGATE);
350 			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
351 							       AMD_PG_STATE_UNGATE);
352 
353 		}
354 	}
355 	mutex_unlock(&adev->vce.idle_mutex);
356 }
357 
358 /**
359  * amdgpu_vce_ring_end_use - power VCE down
360  *
361  * @ring: amdgpu ring
362  *
363  * Schedule work to power VCE down again
364  */
365 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
366 {
367 	if (!amdgpu_sriov_vf(ring->adev))
368 		schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
369 }
370 
371 /**
372  * amdgpu_vce_free_handles - free still open VCE handles
373  *
374  * @adev: amdgpu_device pointer
375  * @filp: drm file pointer
376  *
377  * Close all VCE handles still open by this file pointer
378  */
379 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
380 {
381 	struct amdgpu_ring *ring = &adev->vce.ring[0];
382 	int i, r;
383 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
384 		uint32_t handle = atomic_read(&adev->vce.handles[i]);
385 
386 		if (!handle || adev->vce.filp[i] != filp)
387 			continue;
388 
389 		r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
390 		if (r)
391 			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
392 
393 		adev->vce.filp[i] = NULL;
394 		atomic_set(&adev->vce.handles[i], 0);
395 	}
396 }
397 
398 /**
399  * amdgpu_vce_get_create_msg - generate a VCE create msg
400  *
401  * @adev: amdgpu_device pointer
402  * @ring: ring we should submit the msg to
403  * @handle: VCE session handle to use
404  * @fence: optional fence to return
405  *
406  * Open up a stream for HW test
407  */
408 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
409 			      struct dma_fence **fence)
410 {
411 	const unsigned ib_size_dw = 1024;
412 	struct amdgpu_job *job;
413 	struct amdgpu_ib *ib;
414 	struct dma_fence *f = NULL;
415 	uint64_t dummy;
416 	int i, r;
417 
418 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
419 	if (r)
420 		return r;
421 
422 	ib = &job->ibs[0];
423 
424 	dummy = ib->gpu_addr + 1024;
425 
426 	/* stitch together an VCE create msg */
427 	ib->length_dw = 0;
428 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
429 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
430 	ib->ptr[ib->length_dw++] = handle;
431 
432 	if ((ring->adev->vce.fw_version >> 24) >= 52)
433 		ib->ptr[ib->length_dw++] = 0x00000040; /* len */
434 	else
435 		ib->ptr[ib->length_dw++] = 0x00000030; /* len */
436 	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
437 	ib->ptr[ib->length_dw++] = 0x00000000;
438 	ib->ptr[ib->length_dw++] = 0x00000042;
439 	ib->ptr[ib->length_dw++] = 0x0000000a;
440 	ib->ptr[ib->length_dw++] = 0x00000001;
441 	ib->ptr[ib->length_dw++] = 0x00000080;
442 	ib->ptr[ib->length_dw++] = 0x00000060;
443 	ib->ptr[ib->length_dw++] = 0x00000100;
444 	ib->ptr[ib->length_dw++] = 0x00000100;
445 	ib->ptr[ib->length_dw++] = 0x0000000c;
446 	ib->ptr[ib->length_dw++] = 0x00000000;
447 	if ((ring->adev->vce.fw_version >> 24) >= 52) {
448 		ib->ptr[ib->length_dw++] = 0x00000000;
449 		ib->ptr[ib->length_dw++] = 0x00000000;
450 		ib->ptr[ib->length_dw++] = 0x00000000;
451 		ib->ptr[ib->length_dw++] = 0x00000000;
452 	}
453 
454 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
455 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
456 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
457 	ib->ptr[ib->length_dw++] = dummy;
458 	ib->ptr[ib->length_dw++] = 0x00000001;
459 
460 	for (i = ib->length_dw; i < ib_size_dw; ++i)
461 		ib->ptr[i] = 0x0;
462 
463 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
464 	job->fence = dma_fence_get(f);
465 	if (r)
466 		goto err;
467 
468 	amdgpu_job_free(job);
469 	if (fence)
470 		*fence = dma_fence_get(f);
471 	dma_fence_put(f);
472 	return 0;
473 
474 err:
475 	amdgpu_job_free(job);
476 	return r;
477 }
478 
479 /**
480  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
481  *
482  * @adev: amdgpu_device pointer
483  * @ring: ring we should submit the msg to
484  * @handle: VCE session handle to use
485  * @fence: optional fence to return
486  *
487  * Close up a stream for HW test or if userspace failed to do so
488  */
489 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
490 			       bool direct, struct dma_fence **fence)
491 {
492 	const unsigned ib_size_dw = 1024;
493 	struct amdgpu_job *job;
494 	struct amdgpu_ib *ib;
495 	struct dma_fence *f = NULL;
496 	int i, r;
497 
498 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
499 	if (r)
500 		return r;
501 
502 	ib = &job->ibs[0];
503 
504 	/* stitch together an VCE destroy msg */
505 	ib->length_dw = 0;
506 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
507 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
508 	ib->ptr[ib->length_dw++] = handle;
509 
510 	ib->ptr[ib->length_dw++] = 0x00000020; /* len */
511 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
512 	ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
513 	ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
514 	ib->ptr[ib->length_dw++] = 0x00000000;
515 	ib->ptr[ib->length_dw++] = 0x00000000;
516 	ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
517 	ib->ptr[ib->length_dw++] = 0x00000000;
518 
519 	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
520 	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
521 
522 	for (i = ib->length_dw; i < ib_size_dw; ++i)
523 		ib->ptr[i] = 0x0;
524 
525 	if (direct) {
526 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
527 		job->fence = dma_fence_get(f);
528 		if (r)
529 			goto err;
530 
531 		amdgpu_job_free(job);
532 	} else {
533 		r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
534 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
535 		if (r)
536 			goto err;
537 	}
538 
539 	if (fence)
540 		*fence = dma_fence_get(f);
541 	dma_fence_put(f);
542 	return 0;
543 
544 err:
545 	amdgpu_job_free(job);
546 	return r;
547 }
548 
549 /**
550  * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
551  *
552  * @p: parser context
553  * @lo: address of lower dword
554  * @hi: address of higher dword
555  * @size: minimum size
556  * @index: bs/fb index
557  *
558  * Make sure that no BO cross a 4GB boundary.
559  */
560 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
561 				  int lo, int hi, unsigned size, int32_t index)
562 {
563 	int64_t offset = ((uint64_t)size) * ((int64_t)index);
564 	struct ttm_operation_ctx ctx = { false, false };
565 	struct amdgpu_bo_va_mapping *mapping;
566 	unsigned i, fpfn, lpfn;
567 	struct amdgpu_bo *bo;
568 	uint64_t addr;
569 	int r;
570 
571 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
572 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
573 	if (index >= 0) {
574 		addr += offset;
575 		fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
576 		lpfn = 0x100000000ULL >> PAGE_SHIFT;
577 	} else {
578 		fpfn = 0;
579 		lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
580 	}
581 
582 	r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
583 	if (r) {
584 		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
585 			  addr, lo, hi, size, index);
586 		return r;
587 	}
588 
589 	for (i = 0; i < bo->placement.num_placement; ++i) {
590 		bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
591 		bo->placements[i].lpfn = bo->placements[i].lpfn ?
592 			min(bo->placements[i].lpfn, lpfn) : lpfn;
593 	}
594 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
595 }
596 
597 
598 /**
599  * amdgpu_vce_cs_reloc - command submission relocation
600  *
601  * @p: parser context
602  * @lo: address of lower dword
603  * @hi: address of higher dword
604  * @size: minimum size
605  *
606  * Patch relocation inside command stream with real buffer address
607  */
608 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
609 			       int lo, int hi, unsigned size, uint32_t index)
610 {
611 	struct amdgpu_bo_va_mapping *mapping;
612 	struct amdgpu_bo *bo;
613 	uint64_t addr;
614 	int r;
615 
616 	if (index == 0xffffffff)
617 		index = 0;
618 
619 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
620 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
621 	addr += ((uint64_t)size) * ((uint64_t)index);
622 
623 	r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
624 	if (r) {
625 		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
626 			  addr, lo, hi, size, index);
627 		return r;
628 	}
629 
630 	if ((addr + (uint64_t)size) >
631 	    (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
632 		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
633 			  addr, lo, hi);
634 		return -EINVAL;
635 	}
636 
637 	addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
638 	addr += amdgpu_bo_gpu_offset(bo);
639 	addr -= ((uint64_t)size) * ((uint64_t)index);
640 
641 	amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
642 	amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
643 
644 	return 0;
645 }
646 
647 /**
648  * amdgpu_vce_validate_handle - validate stream handle
649  *
650  * @p: parser context
651  * @handle: handle to validate
652  * @allocated: allocated a new handle?
653  *
654  * Validates the handle and return the found session index or -EINVAL
655  * we we don't have another free session index.
656  */
657 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
658 				      uint32_t handle, uint32_t *allocated)
659 {
660 	unsigned i;
661 
662 	/* validate the handle */
663 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
664 		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
665 			if (p->adev->vce.filp[i] != p->filp) {
666 				DRM_ERROR("VCE handle collision detected!\n");
667 				return -EINVAL;
668 			}
669 			return i;
670 		}
671 	}
672 
673 	/* handle not found try to alloc a new one */
674 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
675 		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
676 			p->adev->vce.filp[i] = p->filp;
677 			p->adev->vce.img_size[i] = 0;
678 			*allocated |= 1 << i;
679 			return i;
680 		}
681 	}
682 
683 	DRM_ERROR("No more free VCE handles!\n");
684 	return -EINVAL;
685 }
686 
687 /**
688  * amdgpu_vce_cs_parse - parse and validate the command stream
689  *
690  * @p: parser context
691  *
692  */
693 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
694 {
695 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
696 	unsigned fb_idx = 0, bs_idx = 0;
697 	int session_idx = -1;
698 	uint32_t destroyed = 0;
699 	uint32_t created = 0;
700 	uint32_t allocated = 0;
701 	uint32_t tmp, handle = 0;
702 	uint32_t *size = &tmp;
703 	unsigned idx;
704 	int i, r = 0;
705 
706 	p->job->vm = NULL;
707 	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
708 
709 	for (idx = 0; idx < ib->length_dw;) {
710 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
711 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
712 
713 		if ((len < 8) || (len & 3)) {
714 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
715 			r = -EINVAL;
716 			goto out;
717 		}
718 
719 		switch (cmd) {
720 		case 0x00000002: /* task info */
721 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
722 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
723 			break;
724 
725 		case 0x03000001: /* encode */
726 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
727 						   idx + 9, 0, 0);
728 			if (r)
729 				goto out;
730 
731 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
732 						   idx + 11, 0, 0);
733 			if (r)
734 				goto out;
735 			break;
736 
737 		case 0x05000001: /* context buffer */
738 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
739 						   idx + 2, 0, 0);
740 			if (r)
741 				goto out;
742 			break;
743 
744 		case 0x05000004: /* video bitstream buffer */
745 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
746 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
747 						   tmp, bs_idx);
748 			if (r)
749 				goto out;
750 			break;
751 
752 		case 0x05000005: /* feedback buffer */
753 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
754 						   4096, fb_idx);
755 			if (r)
756 				goto out;
757 			break;
758 		}
759 
760 		idx += len / 4;
761 	}
762 
763 	for (idx = 0; idx < ib->length_dw;) {
764 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
765 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
766 
767 		switch (cmd) {
768 		case 0x00000001: /* session */
769 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
770 			session_idx = amdgpu_vce_validate_handle(p, handle,
771 								 &allocated);
772 			if (session_idx < 0) {
773 				r = session_idx;
774 				goto out;
775 			}
776 			size = &p->adev->vce.img_size[session_idx];
777 			break;
778 
779 		case 0x00000002: /* task info */
780 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
781 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
782 			break;
783 
784 		case 0x01000001: /* create */
785 			created |= 1 << session_idx;
786 			if (destroyed & (1 << session_idx)) {
787 				destroyed &= ~(1 << session_idx);
788 				allocated |= 1 << session_idx;
789 
790 			} else if (!(allocated & (1 << session_idx))) {
791 				DRM_ERROR("Handle already in use!\n");
792 				r = -EINVAL;
793 				goto out;
794 			}
795 
796 			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
797 				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
798 				8 * 3 / 2;
799 			break;
800 
801 		case 0x04000001: /* config extension */
802 		case 0x04000002: /* pic control */
803 		case 0x04000005: /* rate control */
804 		case 0x04000007: /* motion estimation */
805 		case 0x04000008: /* rdo */
806 		case 0x04000009: /* vui */
807 		case 0x05000002: /* auxiliary buffer */
808 		case 0x05000009: /* clock table */
809 			break;
810 
811 		case 0x0500000c: /* hw config */
812 			switch (p->adev->asic_type) {
813 #ifdef CONFIG_DRM_AMDGPU_CIK
814 			case CHIP_KAVERI:
815 			case CHIP_MULLINS:
816 #endif
817 			case CHIP_CARRIZO:
818 				break;
819 			default:
820 				r = -EINVAL;
821 				goto out;
822 			}
823 			break;
824 
825 		case 0x03000001: /* encode */
826 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
827 						*size, 0);
828 			if (r)
829 				goto out;
830 
831 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
832 						*size / 3, 0);
833 			if (r)
834 				goto out;
835 			break;
836 
837 		case 0x02000001: /* destroy */
838 			destroyed |= 1 << session_idx;
839 			break;
840 
841 		case 0x05000001: /* context buffer */
842 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
843 						*size * 2, 0);
844 			if (r)
845 				goto out;
846 			break;
847 
848 		case 0x05000004: /* video bitstream buffer */
849 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
850 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
851 						tmp, bs_idx);
852 			if (r)
853 				goto out;
854 			break;
855 
856 		case 0x05000005: /* feedback buffer */
857 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
858 						4096, fb_idx);
859 			if (r)
860 				goto out;
861 			break;
862 
863 		default:
864 			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
865 			r = -EINVAL;
866 			goto out;
867 		}
868 
869 		if (session_idx == -1) {
870 			DRM_ERROR("no session command at start of IB\n");
871 			r = -EINVAL;
872 			goto out;
873 		}
874 
875 		idx += len / 4;
876 	}
877 
878 	if (allocated & ~created) {
879 		DRM_ERROR("New session without create command!\n");
880 		r = -ENOENT;
881 	}
882 
883 out:
884 	if (!r) {
885 		/* No error, free all destroyed handle slots */
886 		tmp = destroyed;
887 	} else {
888 		/* Error during parsing, free all allocated handle slots */
889 		tmp = allocated;
890 	}
891 
892 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
893 		if (tmp & (1 << i))
894 			atomic_set(&p->adev->vce.handles[i], 0);
895 
896 	return r;
897 }
898 
899 /**
900  * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
901  *
902  * @p: parser context
903  *
904  */
905 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
906 {
907 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
908 	int session_idx = -1;
909 	uint32_t destroyed = 0;
910 	uint32_t created = 0;
911 	uint32_t allocated = 0;
912 	uint32_t tmp, handle = 0;
913 	int i, r = 0, idx = 0;
914 
915 	while (idx < ib->length_dw) {
916 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
917 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
918 
919 		if ((len < 8) || (len & 3)) {
920 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
921 			r = -EINVAL;
922 			goto out;
923 		}
924 
925 		switch (cmd) {
926 		case 0x00000001: /* session */
927 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
928 			session_idx = amdgpu_vce_validate_handle(p, handle,
929 								 &allocated);
930 			if (session_idx < 0) {
931 				r = session_idx;
932 				goto out;
933 			}
934 			break;
935 
936 		case 0x01000001: /* create */
937 			created |= 1 << session_idx;
938 			if (destroyed & (1 << session_idx)) {
939 				destroyed &= ~(1 << session_idx);
940 				allocated |= 1 << session_idx;
941 
942 			} else if (!(allocated & (1 << session_idx))) {
943 				DRM_ERROR("Handle already in use!\n");
944 				r = -EINVAL;
945 				goto out;
946 			}
947 
948 			break;
949 
950 		case 0x02000001: /* destroy */
951 			destroyed |= 1 << session_idx;
952 			break;
953 
954 		default:
955 			break;
956 		}
957 
958 		if (session_idx == -1) {
959 			DRM_ERROR("no session command at start of IB\n");
960 			r = -EINVAL;
961 			goto out;
962 		}
963 
964 		idx += len / 4;
965 	}
966 
967 	if (allocated & ~created) {
968 		DRM_ERROR("New session without create command!\n");
969 		r = -ENOENT;
970 	}
971 
972 out:
973 	if (!r) {
974 		/* No error, free all destroyed handle slots */
975 		tmp = destroyed;
976 		amdgpu_ib_free(p->adev, ib, NULL);
977 	} else {
978 		/* Error during parsing, free all allocated handle slots */
979 		tmp = allocated;
980 	}
981 
982 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
983 		if (tmp & (1 << i))
984 			atomic_set(&p->adev->vce.handles[i], 0);
985 
986 	return r;
987 }
988 
989 /**
990  * amdgpu_vce_ring_emit_ib - execute indirect buffer
991  *
992  * @ring: engine to use
993  * @ib: the IB to execute
994  *
995  */
996 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
997 			     unsigned vmid, bool ctx_switch)
998 {
999 	amdgpu_ring_write(ring, VCE_CMD_IB);
1000 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1001 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1002 	amdgpu_ring_write(ring, ib->length_dw);
1003 }
1004 
1005 /**
1006  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1007  *
1008  * @ring: engine to use
1009  * @fence: the fence
1010  *
1011  */
1012 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1013 				unsigned flags)
1014 {
1015 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1016 
1017 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
1018 	amdgpu_ring_write(ring, addr);
1019 	amdgpu_ring_write(ring, upper_32_bits(addr));
1020 	amdgpu_ring_write(ring, seq);
1021 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
1022 	amdgpu_ring_write(ring, VCE_CMD_END);
1023 }
1024 
1025 /**
1026  * amdgpu_vce_ring_test_ring - test if VCE ring is working
1027  *
1028  * @ring: the engine to test on
1029  *
1030  */
1031 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1032 {
1033 	struct amdgpu_device *adev = ring->adev;
1034 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
1035 	unsigned i;
1036 	int r, timeout = adev->usec_timeout;
1037 
1038 	/* skip ring test for sriov*/
1039 	if (amdgpu_sriov_vf(adev))
1040 		return 0;
1041 
1042 	r = amdgpu_ring_alloc(ring, 16);
1043 	if (r) {
1044 		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
1045 			  ring->idx, r);
1046 		return r;
1047 	}
1048 	amdgpu_ring_write(ring, VCE_CMD_END);
1049 	amdgpu_ring_commit(ring);
1050 
1051 	for (i = 0; i < timeout; i++) {
1052 		if (amdgpu_ring_get_rptr(ring) != rptr)
1053 			break;
1054 		DRM_UDELAY(1);
1055 	}
1056 
1057 	if (i < timeout) {
1058 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
1059 			 ring->idx, i);
1060 	} else {
1061 		DRM_ERROR("amdgpu: ring %d test failed\n",
1062 			  ring->idx);
1063 		r = -ETIMEDOUT;
1064 	}
1065 
1066 	return r;
1067 }
1068 
1069 /**
1070  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1071  *
1072  * @ring: the engine to test on
1073  *
1074  */
1075 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1076 {
1077 	struct dma_fence *fence = NULL;
1078 	long r;
1079 
1080 	/* skip vce ring1/2 ib test for now, since it's not reliable */
1081 	if (ring != &ring->adev->vce.ring[0])
1082 		return 0;
1083 
1084 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1085 	if (r) {
1086 		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
1087 		goto error;
1088 	}
1089 
1090 	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1091 	if (r) {
1092 		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
1093 		goto error;
1094 	}
1095 
1096 	r = dma_fence_wait_timeout(fence, false, timeout);
1097 	if (r == 0) {
1098 		DRM_ERROR("amdgpu: IB test timed out.\n");
1099 		r = -ETIMEDOUT;
1100 	} else if (r < 0) {
1101 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1102 	} else {
1103 		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
1104 		r = 0;
1105 	}
1106 error:
1107 	dma_fence_put(fence);
1108 	return r;
1109 }
1110