1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <deathsimple@vodafone.de>
29  */
30 
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <drm/drmP.h>
34 #include <drm/drm.h>
35 
36 #include "amdgpu.h"
37 #include "amdgpu_pm.h"
38 #include "amdgpu_uvd.h"
39 #include "cikd.h"
40 #include "uvd/uvd_4_2_d.h"
41 
42 /* 1 second timeout */
43 #define UVD_IDLE_TIMEOUT_MS	1000
44 
45 /* Firmware Names */
46 #ifdef CONFIG_DRM_AMDGPU_CIK
47 #define FIRMWARE_BONAIRE	"radeon/bonaire_uvd.bin"
48 #define FIRMWARE_KABINI 	"radeon/kabini_uvd.bin"
49 #define FIRMWARE_KAVERI 	"radeon/kaveri_uvd.bin"
50 #define FIRMWARE_HAWAII 	"radeon/hawaii_uvd.bin"
51 #define FIRMWARE_MULLINS	"radeon/mullins_uvd.bin"
52 #endif
53 #define FIRMWARE_TONGA		"amdgpu/tonga_uvd.bin"
54 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_uvd.bin"
55 #define FIRMWARE_FIJI		"amdgpu/fiji_uvd.bin"
56 #define FIRMWARE_STONEY		"amdgpu/stoney_uvd.bin"
57 
58 /**
59  * amdgpu_uvd_cs_ctx - Command submission parser context
60  *
61  * Used for emulating virtual memory support on UVD 4.2.
62  */
63 struct amdgpu_uvd_cs_ctx {
64 	struct amdgpu_cs_parser *parser;
65 	unsigned reg, count;
66 	unsigned data0, data1;
67 	unsigned idx;
68 	unsigned ib_idx;
69 
70 	/* does the IB has a msg command */
71 	bool has_msg_cmd;
72 
73 	/* minimum buffer sizes */
74 	unsigned *buf_sizes;
75 };
76 
77 #ifdef CONFIG_DRM_AMDGPU_CIK
78 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
79 MODULE_FIRMWARE(FIRMWARE_KABINI);
80 MODULE_FIRMWARE(FIRMWARE_KAVERI);
81 MODULE_FIRMWARE(FIRMWARE_HAWAII);
82 MODULE_FIRMWARE(FIRMWARE_MULLINS);
83 #endif
84 MODULE_FIRMWARE(FIRMWARE_TONGA);
85 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
86 MODULE_FIRMWARE(FIRMWARE_FIJI);
87 MODULE_FIRMWARE(FIRMWARE_STONEY);
88 
89 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
90 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
91 
92 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
93 {
94 	unsigned long bo_size;
95 	const char *fw_name;
96 	const struct common_firmware_header *hdr;
97 	unsigned version_major, version_minor, family_id;
98 	int i, r;
99 
100 	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
101 
102 	switch (adev->asic_type) {
103 #ifdef CONFIG_DRM_AMDGPU_CIK
104 	case CHIP_BONAIRE:
105 		fw_name = FIRMWARE_BONAIRE;
106 		break;
107 	case CHIP_KABINI:
108 		fw_name = FIRMWARE_KABINI;
109 		break;
110 	case CHIP_KAVERI:
111 		fw_name = FIRMWARE_KAVERI;
112 		break;
113 	case CHIP_HAWAII:
114 		fw_name = FIRMWARE_HAWAII;
115 		break;
116 	case CHIP_MULLINS:
117 		fw_name = FIRMWARE_MULLINS;
118 		break;
119 #endif
120 	case CHIP_TONGA:
121 		fw_name = FIRMWARE_TONGA;
122 		break;
123 	case CHIP_FIJI:
124 		fw_name = FIRMWARE_FIJI;
125 		break;
126 	case CHIP_CARRIZO:
127 		fw_name = FIRMWARE_CARRIZO;
128 		break;
129 	case CHIP_STONEY:
130 		fw_name = FIRMWARE_STONEY;
131 		break;
132 	default:
133 		return -EINVAL;
134 	}
135 
136 	r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
137 	if (r) {
138 		dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
139 			fw_name);
140 		return r;
141 	}
142 
143 	r = amdgpu_ucode_validate(adev->uvd.fw);
144 	if (r) {
145 		dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
146 			fw_name);
147 		release_firmware(adev->uvd.fw);
148 		adev->uvd.fw = NULL;
149 		return r;
150 	}
151 
152 	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
153 	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
154 	version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
155 	version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
156 	DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
157 		version_major, version_minor, family_id);
158 
159 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
160 		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
161 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
162 			     AMDGPU_GEM_DOMAIN_VRAM,
163 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
164 			     NULL, NULL, &adev->uvd.vcpu_bo);
165 	if (r) {
166 		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
167 		return r;
168 	}
169 
170 	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
171 	if (r) {
172 		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
173 		dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
174 		return r;
175 	}
176 
177 	r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
178 			  &adev->uvd.gpu_addr);
179 	if (r) {
180 		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
181 		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
182 		dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
183 		return r;
184 	}
185 
186 	r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
187 	if (r) {
188 		dev_err(adev->dev, "(%d) UVD map failed\n", r);
189 		return r;
190 	}
191 
192 	amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
193 
194 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
195 		atomic_set(&adev->uvd.handles[i], 0);
196 		adev->uvd.filp[i] = NULL;
197 	}
198 
199 	/* from uvd v5.0 HW addressing capacity increased to 64 bits */
200 	if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
201 		adev->uvd.address_64_bit = true;
202 
203 	return 0;
204 }
205 
206 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
207 {
208 	int r;
209 
210 	if (adev->uvd.vcpu_bo == NULL)
211 		return 0;
212 
213 	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
214 	if (!r) {
215 		amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
216 		amdgpu_bo_unpin(adev->uvd.vcpu_bo);
217 		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
218 	}
219 
220 	amdgpu_bo_unref(&adev->uvd.vcpu_bo);
221 
222 	amdgpu_ring_fini(&adev->uvd.ring);
223 
224 	release_firmware(adev->uvd.fw);
225 
226 	return 0;
227 }
228 
229 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
230 {
231 	struct amdgpu_ring *ring = &adev->uvd.ring;
232 	int i, r;
233 
234 	if (adev->uvd.vcpu_bo == NULL)
235 		return 0;
236 
237 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
238 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
239 		if (handle != 0) {
240 			struct fence *fence;
241 
242 			amdgpu_uvd_note_usage(adev);
243 
244 			r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
245 			if (r) {
246 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
247 				continue;
248 			}
249 
250 			fence_wait(fence, false);
251 			fence_put(fence);
252 
253 			adev->uvd.filp[i] = NULL;
254 			atomic_set(&adev->uvd.handles[i], 0);
255 		}
256 	}
257 
258 	return 0;
259 }
260 
261 int amdgpu_uvd_resume(struct amdgpu_device *adev)
262 {
263 	unsigned size;
264 	void *ptr;
265 	const struct common_firmware_header *hdr;
266 	unsigned offset;
267 
268 	if (adev->uvd.vcpu_bo == NULL)
269 		return -EINVAL;
270 
271 	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
272 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
273 	memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
274 		(adev->uvd.fw->size) - offset);
275 
276 	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
277 	size -= le32_to_cpu(hdr->ucode_size_bytes);
278 	ptr = adev->uvd.cpu_addr;
279 	ptr += le32_to_cpu(hdr->ucode_size_bytes);
280 
281 	memset(ptr, 0, size);
282 
283 	return 0;
284 }
285 
286 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
287 {
288 	struct amdgpu_ring *ring = &adev->uvd.ring;
289 	int i, r;
290 
291 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
292 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
293 		if (handle != 0 && adev->uvd.filp[i] == filp) {
294 			struct fence *fence;
295 
296 			amdgpu_uvd_note_usage(adev);
297 
298 			r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
299 			if (r) {
300 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
301 				continue;
302 			}
303 
304 			fence_wait(fence, false);
305 			fence_put(fence);
306 
307 			adev->uvd.filp[i] = NULL;
308 			atomic_set(&adev->uvd.handles[i], 0);
309 		}
310 	}
311 }
312 
313 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
314 {
315 	int i;
316 	for (i = 0; i < rbo->placement.num_placement; ++i) {
317 		rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
318 		rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
319 	}
320 }
321 
322 /**
323  * amdgpu_uvd_cs_pass1 - first parsing round
324  *
325  * @ctx: UVD parser context
326  *
327  * Make sure UVD message and feedback buffers are in VRAM and
328  * nobody is violating an 256MB boundary.
329  */
330 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
331 {
332 	struct amdgpu_bo_va_mapping *mapping;
333 	struct amdgpu_bo *bo;
334 	uint32_t cmd, lo, hi;
335 	uint64_t addr;
336 	int r = 0;
337 
338 	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
339 	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
340 	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
341 
342 	mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
343 	if (mapping == NULL) {
344 		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
345 		return -EINVAL;
346 	}
347 
348 	if (!ctx->parser->adev->uvd.address_64_bit) {
349 		/* check if it's a message or feedback command */
350 		cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
351 		if (cmd == 0x0 || cmd == 0x3) {
352 			/* yes, force it into VRAM */
353 			uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
354 			amdgpu_ttm_placement_from_domain(bo, domain);
355 		}
356 		amdgpu_uvd_force_into_uvd_segment(bo);
357 
358 		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
359 	}
360 
361 	return r;
362 }
363 
364 /**
365  * amdgpu_uvd_cs_msg_decode - handle UVD decode message
366  *
367  * @msg: pointer to message structure
368  * @buf_sizes: returned buffer sizes
369  *
370  * Peek into the decode message and calculate the necessary buffer sizes.
371  */
372 static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
373 {
374 	unsigned stream_type = msg[4];
375 	unsigned width = msg[6];
376 	unsigned height = msg[7];
377 	unsigned dpb_size = msg[9];
378 	unsigned pitch = msg[28];
379 	unsigned level = msg[57];
380 
381 	unsigned width_in_mb = width / 16;
382 	unsigned height_in_mb = ALIGN(height / 16, 2);
383 	unsigned fs_in_mb = width_in_mb * height_in_mb;
384 
385 	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
386 	unsigned min_ctx_size = 0;
387 
388 	image_size = width * height;
389 	image_size += image_size / 2;
390 	image_size = ALIGN(image_size, 1024);
391 
392 	switch (stream_type) {
393 	case 0: /* H264 */
394 	case 7: /* H264 Perf */
395 		switch(level) {
396 		case 30:
397 			num_dpb_buffer = 8100 / fs_in_mb;
398 			break;
399 		case 31:
400 			num_dpb_buffer = 18000 / fs_in_mb;
401 			break;
402 		case 32:
403 			num_dpb_buffer = 20480 / fs_in_mb;
404 			break;
405 		case 41:
406 			num_dpb_buffer = 32768 / fs_in_mb;
407 			break;
408 		case 42:
409 			num_dpb_buffer = 34816 / fs_in_mb;
410 			break;
411 		case 50:
412 			num_dpb_buffer = 110400 / fs_in_mb;
413 			break;
414 		case 51:
415 			num_dpb_buffer = 184320 / fs_in_mb;
416 			break;
417 		default:
418 			num_dpb_buffer = 184320 / fs_in_mb;
419 			break;
420 		}
421 		num_dpb_buffer++;
422 		if (num_dpb_buffer > 17)
423 			num_dpb_buffer = 17;
424 
425 		/* reference picture buffer */
426 		min_dpb_size = image_size * num_dpb_buffer;
427 
428 		/* macroblock context buffer */
429 		min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
430 
431 		/* IT surface buffer */
432 		min_dpb_size += width_in_mb * height_in_mb * 32;
433 		break;
434 
435 	case 1: /* VC1 */
436 
437 		/* reference picture buffer */
438 		min_dpb_size = image_size * 3;
439 
440 		/* CONTEXT_BUFFER */
441 		min_dpb_size += width_in_mb * height_in_mb * 128;
442 
443 		/* IT surface buffer */
444 		min_dpb_size += width_in_mb * 64;
445 
446 		/* DB surface buffer */
447 		min_dpb_size += width_in_mb * 128;
448 
449 		/* BP */
450 		tmp = max(width_in_mb, height_in_mb);
451 		min_dpb_size += ALIGN(tmp * 7 * 16, 64);
452 		break;
453 
454 	case 3: /* MPEG2 */
455 
456 		/* reference picture buffer */
457 		min_dpb_size = image_size * 3;
458 		break;
459 
460 	case 4: /* MPEG4 */
461 
462 		/* reference picture buffer */
463 		min_dpb_size = image_size * 3;
464 
465 		/* CM */
466 		min_dpb_size += width_in_mb * height_in_mb * 64;
467 
468 		/* IT surface buffer */
469 		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
470 		break;
471 
472 	case 16: /* H265 */
473 		image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
474 		image_size = ALIGN(image_size, 256);
475 
476 		num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
477 		min_dpb_size = image_size * num_dpb_buffer;
478 		min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
479 					   * 16 * num_dpb_buffer + 52 * 1024;
480 		break;
481 
482 	default:
483 		DRM_ERROR("UVD codec not handled %d!\n", stream_type);
484 		return -EINVAL;
485 	}
486 
487 	if (width > pitch) {
488 		DRM_ERROR("Invalid UVD decoding target pitch!\n");
489 		return -EINVAL;
490 	}
491 
492 	if (dpb_size < min_dpb_size) {
493 		DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
494 			  dpb_size, min_dpb_size);
495 		return -EINVAL;
496 	}
497 
498 	buf_sizes[0x1] = dpb_size;
499 	buf_sizes[0x2] = image_size;
500 	buf_sizes[0x4] = min_ctx_size;
501 	return 0;
502 }
503 
504 /**
505  * amdgpu_uvd_cs_msg - handle UVD message
506  *
507  * @ctx: UVD parser context
508  * @bo: buffer object containing the message
509  * @offset: offset into the buffer object
510  *
511  * Peek into the UVD message and extract the session id.
512  * Make sure that we don't open up to many sessions.
513  */
514 static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
515 			     struct amdgpu_bo *bo, unsigned offset)
516 {
517 	struct amdgpu_device *adev = ctx->parser->adev;
518 	int32_t *msg, msg_type, handle;
519 	void *ptr;
520 	long r;
521 	int i;
522 
523 	if (offset & 0x3F) {
524 		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
525 		return -EINVAL;
526 	}
527 
528 	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
529 						MAX_SCHEDULE_TIMEOUT);
530 	if (r < 0) {
531 		DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
532 		return r;
533 	}
534 
535 	r = amdgpu_bo_kmap(bo, &ptr);
536 	if (r) {
537 		DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
538 		return r;
539 	}
540 
541 	msg = ptr + offset;
542 
543 	msg_type = msg[1];
544 	handle = msg[2];
545 
546 	if (handle == 0) {
547 		DRM_ERROR("Invalid UVD handle!\n");
548 		return -EINVAL;
549 	}
550 
551 	switch (msg_type) {
552 	case 0:
553 		/* it's a create msg, calc image size (width * height) */
554 		amdgpu_bo_kunmap(bo);
555 
556 		/* try to alloc a new handle */
557 		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
558 			if (atomic_read(&adev->uvd.handles[i]) == handle) {
559 				DRM_ERROR("Handle 0x%x already in use!\n", handle);
560 				return -EINVAL;
561 			}
562 
563 			if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
564 				adev->uvd.filp[i] = ctx->parser->filp;
565 				return 0;
566 			}
567 		}
568 
569 		DRM_ERROR("No more free UVD handles!\n");
570 		return -EINVAL;
571 
572 	case 1:
573 		/* it's a decode msg, calc buffer sizes */
574 		r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
575 		amdgpu_bo_kunmap(bo);
576 		if (r)
577 			return r;
578 
579 		/* validate the handle */
580 		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
581 			if (atomic_read(&adev->uvd.handles[i]) == handle) {
582 				if (adev->uvd.filp[i] != ctx->parser->filp) {
583 					DRM_ERROR("UVD handle collision detected!\n");
584 					return -EINVAL;
585 				}
586 				return 0;
587 			}
588 		}
589 
590 		DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
591 		return -ENOENT;
592 
593 	case 2:
594 		/* it's a destroy msg, free the handle */
595 		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
596 			atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
597 		amdgpu_bo_kunmap(bo);
598 		return 0;
599 
600 	default:
601 		DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
602 		return -EINVAL;
603 	}
604 	BUG();
605 	return -EINVAL;
606 }
607 
608 /**
609  * amdgpu_uvd_cs_pass2 - second parsing round
610  *
611  * @ctx: UVD parser context
612  *
613  * Patch buffer addresses, make sure buffer sizes are correct.
614  */
615 static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
616 {
617 	struct amdgpu_bo_va_mapping *mapping;
618 	struct amdgpu_bo *bo;
619 	struct amdgpu_ib *ib;
620 	uint32_t cmd, lo, hi;
621 	uint64_t start, end;
622 	uint64_t addr;
623 	int r;
624 
625 	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
626 	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
627 	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
628 
629 	mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
630 	if (mapping == NULL)
631 		return -EINVAL;
632 
633 	start = amdgpu_bo_gpu_offset(bo);
634 
635 	end = (mapping->it.last + 1 - mapping->it.start);
636 	end = end * AMDGPU_GPU_PAGE_SIZE + start;
637 
638 	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
639 	start += addr;
640 
641 	ib = &ctx->parser->ibs[ctx->ib_idx];
642 	ib->ptr[ctx->data0] = start & 0xFFFFFFFF;
643 	ib->ptr[ctx->data1] = start >> 32;
644 
645 	cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
646 	if (cmd < 0x4) {
647 		if ((end - start) < ctx->buf_sizes[cmd]) {
648 			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
649 				  (unsigned)(end - start),
650 				  ctx->buf_sizes[cmd]);
651 			return -EINVAL;
652 		}
653 
654 	} else if (cmd == 0x206) {
655 		if ((end - start) < ctx->buf_sizes[4]) {
656 			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
657 					  (unsigned)(end - start),
658 					  ctx->buf_sizes[4]);
659 			return -EINVAL;
660 		}
661 	} else if ((cmd != 0x100) && (cmd != 0x204)) {
662 		DRM_ERROR("invalid UVD command %X!\n", cmd);
663 		return -EINVAL;
664 	}
665 
666 	if (!ctx->parser->adev->uvd.address_64_bit) {
667 		if ((start >> 28) != ((end - 1) >> 28)) {
668 			DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
669 				  start, end);
670 			return -EINVAL;
671 		}
672 
673 		if ((cmd == 0 || cmd == 0x3) &&
674 		    (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
675 			DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
676 				  start, end);
677 			return -EINVAL;
678 		}
679 	}
680 
681 	if (cmd == 0) {
682 		ctx->has_msg_cmd = true;
683 		r = amdgpu_uvd_cs_msg(ctx, bo, addr);
684 		if (r)
685 			return r;
686 	} else if (!ctx->has_msg_cmd) {
687 		DRM_ERROR("Message needed before other commands are send!\n");
688 		return -EINVAL;
689 	}
690 
691 	return 0;
692 }
693 
694 /**
695  * amdgpu_uvd_cs_reg - parse register writes
696  *
697  * @ctx: UVD parser context
698  * @cb: callback function
699  *
700  * Parse the register writes, call cb on each complete command.
701  */
702 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
703 			     int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
704 {
705 	struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
706 	int i, r;
707 
708 	ctx->idx++;
709 	for (i = 0; i <= ctx->count; ++i) {
710 		unsigned reg = ctx->reg + i;
711 
712 		if (ctx->idx >= ib->length_dw) {
713 			DRM_ERROR("Register command after end of CS!\n");
714 			return -EINVAL;
715 		}
716 
717 		switch (reg) {
718 		case mmUVD_GPCOM_VCPU_DATA0:
719 			ctx->data0 = ctx->idx;
720 			break;
721 		case mmUVD_GPCOM_VCPU_DATA1:
722 			ctx->data1 = ctx->idx;
723 			break;
724 		case mmUVD_GPCOM_VCPU_CMD:
725 			r = cb(ctx);
726 			if (r)
727 				return r;
728 			break;
729 		case mmUVD_ENGINE_CNTL:
730 			break;
731 		default:
732 			DRM_ERROR("Invalid reg 0x%X!\n", reg);
733 			return -EINVAL;
734 		}
735 		ctx->idx++;
736 	}
737 	return 0;
738 }
739 
740 /**
741  * amdgpu_uvd_cs_packets - parse UVD packets
742  *
743  * @ctx: UVD parser context
744  * @cb: callback function
745  *
746  * Parse the command stream packets.
747  */
748 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
749 				 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
750 {
751 	struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
752 	int r;
753 
754 	for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
755 		uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
756 		unsigned type = CP_PACKET_GET_TYPE(cmd);
757 		switch (type) {
758 		case PACKET_TYPE0:
759 			ctx->reg = CP_PACKET0_GET_REG(cmd);
760 			ctx->count = CP_PACKET_GET_COUNT(cmd);
761 			r = amdgpu_uvd_cs_reg(ctx, cb);
762 			if (r)
763 				return r;
764 			break;
765 		case PACKET_TYPE2:
766 			++ctx->idx;
767 			break;
768 		default:
769 			DRM_ERROR("Unknown packet type %d !\n", type);
770 			return -EINVAL;
771 		}
772 	}
773 	return 0;
774 }
775 
776 /**
777  * amdgpu_uvd_ring_parse_cs - UVD command submission parser
778  *
779  * @parser: Command submission parser context
780  *
781  * Parse the command stream, patch in addresses as necessary.
782  */
783 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
784 {
785 	struct amdgpu_uvd_cs_ctx ctx = {};
786 	unsigned buf_sizes[] = {
787 		[0x00000000]	=	2048,
788 		[0x00000001]	=	0xFFFFFFFF,
789 		[0x00000002]	=	0xFFFFFFFF,
790 		[0x00000003]	=	2048,
791 		[0x00000004]	=	0xFFFFFFFF,
792 	};
793 	struct amdgpu_ib *ib = &parser->ibs[ib_idx];
794 	int r;
795 
796 	if (ib->length_dw % 16) {
797 		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
798 			  ib->length_dw);
799 		return -EINVAL;
800 	}
801 
802 	ctx.parser = parser;
803 	ctx.buf_sizes = buf_sizes;
804 	ctx.ib_idx = ib_idx;
805 
806 	/* first round, make sure the buffers are actually in the UVD segment */
807 	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
808 	if (r)
809 		return r;
810 
811 	/* second round, patch buffer addresses into the command stream */
812 	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
813 	if (r)
814 		return r;
815 
816 	if (!ctx.has_msg_cmd) {
817 		DRM_ERROR("UVD-IBs need a msg command!\n");
818 		return -EINVAL;
819 	}
820 
821 	amdgpu_uvd_note_usage(ctx.parser->adev);
822 
823 	return 0;
824 }
825 
826 static int amdgpu_uvd_free_job(
827 	struct amdgpu_job *job)
828 {
829 	amdgpu_ib_free(job->adev, job->ibs);
830 	kfree(job->ibs);
831 	return 0;
832 }
833 
834 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
835 			       struct amdgpu_bo *bo,
836 			       struct fence **fence)
837 {
838 	struct ttm_validate_buffer tv;
839 	struct ww_acquire_ctx ticket;
840 	struct list_head head;
841 	struct amdgpu_ib *ib = NULL;
842 	struct fence *f = NULL;
843 	struct amdgpu_device *adev = ring->adev;
844 	uint64_t addr;
845 	int i, r;
846 
847 	memset(&tv, 0, sizeof(tv));
848 	tv.bo = &bo->tbo;
849 
850 	INIT_LIST_HEAD(&head);
851 	list_add(&tv.head, &head);
852 
853 	r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
854 	if (r)
855 		return r;
856 
857 	if (!bo->adev->uvd.address_64_bit) {
858 		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
859 		amdgpu_uvd_force_into_uvd_segment(bo);
860 	}
861 
862 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
863 	if (r)
864 		goto err;
865 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
866 	if (!ib) {
867 		r = -ENOMEM;
868 		goto err;
869 	}
870 	r = amdgpu_ib_get(ring, NULL, 64, ib);
871 	if (r)
872 		goto err1;
873 
874 	addr = amdgpu_bo_gpu_offset(bo);
875 	ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
876 	ib->ptr[1] = addr;
877 	ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
878 	ib->ptr[3] = addr >> 32;
879 	ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
880 	ib->ptr[5] = 0;
881 	for (i = 6; i < 16; ++i)
882 		ib->ptr[i] = PACKET2(0);
883 	ib->length_dw = 16;
884 
885 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
886 						 &amdgpu_uvd_free_job,
887 						 AMDGPU_FENCE_OWNER_UNDEFINED,
888 						 &f);
889 	if (r)
890 		goto err2;
891 
892 	ttm_eu_fence_buffer_objects(&ticket, &head, f);
893 
894 	if (fence)
895 		*fence = fence_get(f);
896 	amdgpu_bo_unref(&bo);
897 	fence_put(f);
898 
899 	return 0;
900 err2:
901 	amdgpu_ib_free(ring->adev, ib);
902 err1:
903 	kfree(ib);
904 err:
905 	ttm_eu_backoff_reservation(&ticket, &head);
906 	return r;
907 }
908 
909 /* multiple fence commands without any stream commands in between can
910    crash the vcpu so just try to emmit a dummy create/destroy msg to
911    avoid this */
912 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
913 			      struct fence **fence)
914 {
915 	struct amdgpu_device *adev = ring->adev;
916 	struct amdgpu_bo *bo;
917 	uint32_t *msg;
918 	int r, i;
919 
920 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
921 			     AMDGPU_GEM_DOMAIN_VRAM,
922 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
923 			     NULL, NULL, &bo);
924 	if (r)
925 		return r;
926 
927 	r = amdgpu_bo_reserve(bo, false);
928 	if (r) {
929 		amdgpu_bo_unref(&bo);
930 		return r;
931 	}
932 
933 	r = amdgpu_bo_kmap(bo, (void **)&msg);
934 	if (r) {
935 		amdgpu_bo_unreserve(bo);
936 		amdgpu_bo_unref(&bo);
937 		return r;
938 	}
939 
940 	/* stitch together an UVD create msg */
941 	msg[0] = cpu_to_le32(0x00000de4);
942 	msg[1] = cpu_to_le32(0x00000000);
943 	msg[2] = cpu_to_le32(handle);
944 	msg[3] = cpu_to_le32(0x00000000);
945 	msg[4] = cpu_to_le32(0x00000000);
946 	msg[5] = cpu_to_le32(0x00000000);
947 	msg[6] = cpu_to_le32(0x00000000);
948 	msg[7] = cpu_to_le32(0x00000780);
949 	msg[8] = cpu_to_le32(0x00000440);
950 	msg[9] = cpu_to_le32(0x00000000);
951 	msg[10] = cpu_to_le32(0x01b37000);
952 	for (i = 11; i < 1024; ++i)
953 		msg[i] = cpu_to_le32(0x0);
954 
955 	amdgpu_bo_kunmap(bo);
956 	amdgpu_bo_unreserve(bo);
957 
958 	return amdgpu_uvd_send_msg(ring, bo, fence);
959 }
960 
961 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
962 			       struct fence **fence)
963 {
964 	struct amdgpu_device *adev = ring->adev;
965 	struct amdgpu_bo *bo;
966 	uint32_t *msg;
967 	int r, i;
968 
969 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
970 			     AMDGPU_GEM_DOMAIN_VRAM,
971 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
972 			     NULL, NULL, &bo);
973 	if (r)
974 		return r;
975 
976 	r = amdgpu_bo_reserve(bo, false);
977 	if (r) {
978 		amdgpu_bo_unref(&bo);
979 		return r;
980 	}
981 
982 	r = amdgpu_bo_kmap(bo, (void **)&msg);
983 	if (r) {
984 		amdgpu_bo_unreserve(bo);
985 		amdgpu_bo_unref(&bo);
986 		return r;
987 	}
988 
989 	/* stitch together an UVD destroy msg */
990 	msg[0] = cpu_to_le32(0x00000de4);
991 	msg[1] = cpu_to_le32(0x00000002);
992 	msg[2] = cpu_to_le32(handle);
993 	msg[3] = cpu_to_le32(0x00000000);
994 	for (i = 4; i < 1024; ++i)
995 		msg[i] = cpu_to_le32(0x0);
996 
997 	amdgpu_bo_kunmap(bo);
998 	amdgpu_bo_unreserve(bo);
999 
1000 	return amdgpu_uvd_send_msg(ring, bo, fence);
1001 }
1002 
1003 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1004 {
1005 	struct amdgpu_device *adev =
1006 		container_of(work, struct amdgpu_device, uvd.idle_work.work);
1007 	unsigned i, fences, handles = 0;
1008 
1009 	fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
1010 
1011 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
1012 		if (atomic_read(&adev->uvd.handles[i]))
1013 			++handles;
1014 
1015 	if (fences == 0 && handles == 0) {
1016 		if (adev->pm.dpm_enabled) {
1017 			amdgpu_dpm_enable_uvd(adev, false);
1018 		} else {
1019 			amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1020 		}
1021 	} else {
1022 		schedule_delayed_work(&adev->uvd.idle_work,
1023 				      msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
1024 	}
1025 }
1026 
1027 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
1028 {
1029 	bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1030 	set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
1031 					    msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
1032 
1033 	if (set_clocks) {
1034 		if (adev->pm.dpm_enabled) {
1035 			amdgpu_dpm_enable_uvd(adev, true);
1036 		} else {
1037 			amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1038 		}
1039 	}
1040 }
1041