xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c (revision 781095f903f398148cd0b646d3984234a715f29e)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <deathsimple@vodafone.de>
29  */
30 
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <drm/drmP.h>
34 #include <drm/drm.h>
35 
36 #include "amdgpu.h"
37 #include "amdgpu_pm.h"
38 #include "amdgpu_uvd.h"
39 #include "cikd.h"
40 #include "uvd/uvd_4_2_d.h"
41 
42 /* 1 second timeout */
43 #define UVD_IDLE_TIMEOUT_MS	1000
44 
45 /* Firmware Names */
46 #ifdef CONFIG_DRM_AMDGPU_CIK
47 #define FIRMWARE_BONAIRE	"radeon/bonaire_uvd.bin"
48 #define FIRMWARE_KABINI 	"radeon/kabini_uvd.bin"
49 #define FIRMWARE_KAVERI 	"radeon/kaveri_uvd.bin"
50 #define FIRMWARE_HAWAII 	"radeon/hawaii_uvd.bin"
51 #define FIRMWARE_MULLINS	"radeon/mullins_uvd.bin"
52 #endif
53 #define FIRMWARE_TONGA		"amdgpu/tonga_uvd.bin"
54 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_uvd.bin"
55 #define FIRMWARE_FIJI		"amdgpu/fiji_uvd.bin"
56 #define FIRMWARE_STONEY		"amdgpu/stoney_uvd.bin"
57 
58 /**
59  * amdgpu_uvd_cs_ctx - Command submission parser context
60  *
61  * Used for emulating virtual memory support on UVD 4.2.
62  */
63 struct amdgpu_uvd_cs_ctx {
64 	struct amdgpu_cs_parser *parser;
65 	unsigned reg, count;
66 	unsigned data0, data1;
67 	unsigned idx;
68 	unsigned ib_idx;
69 
70 	/* does the IB has a msg command */
71 	bool has_msg_cmd;
72 
73 	/* minimum buffer sizes */
74 	unsigned *buf_sizes;
75 };
76 
77 #ifdef CONFIG_DRM_AMDGPU_CIK
78 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
79 MODULE_FIRMWARE(FIRMWARE_KABINI);
80 MODULE_FIRMWARE(FIRMWARE_KAVERI);
81 MODULE_FIRMWARE(FIRMWARE_HAWAII);
82 MODULE_FIRMWARE(FIRMWARE_MULLINS);
83 #endif
84 MODULE_FIRMWARE(FIRMWARE_TONGA);
85 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
86 MODULE_FIRMWARE(FIRMWARE_FIJI);
87 MODULE_FIRMWARE(FIRMWARE_STONEY);
88 
89 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
90 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
91 
92 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
93 {
94 	unsigned long bo_size;
95 	const char *fw_name;
96 	const struct common_firmware_header *hdr;
97 	unsigned version_major, version_minor, family_id;
98 	int i, r;
99 
100 	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
101 
102 	switch (adev->asic_type) {
103 #ifdef CONFIG_DRM_AMDGPU_CIK
104 	case CHIP_BONAIRE:
105 		fw_name = FIRMWARE_BONAIRE;
106 		break;
107 	case CHIP_KABINI:
108 		fw_name = FIRMWARE_KABINI;
109 		break;
110 	case CHIP_KAVERI:
111 		fw_name = FIRMWARE_KAVERI;
112 		break;
113 	case CHIP_HAWAII:
114 		fw_name = FIRMWARE_HAWAII;
115 		break;
116 	case CHIP_MULLINS:
117 		fw_name = FIRMWARE_MULLINS;
118 		break;
119 #endif
120 	case CHIP_TONGA:
121 		fw_name = FIRMWARE_TONGA;
122 		break;
123 	case CHIP_FIJI:
124 		fw_name = FIRMWARE_FIJI;
125 		break;
126 	case CHIP_CARRIZO:
127 		fw_name = FIRMWARE_CARRIZO;
128 		break;
129 	case CHIP_STONEY:
130 		fw_name = FIRMWARE_STONEY;
131 		break;
132 	default:
133 		return -EINVAL;
134 	}
135 
136 	r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
137 	if (r) {
138 		dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
139 			fw_name);
140 		return r;
141 	}
142 
143 	r = amdgpu_ucode_validate(adev->uvd.fw);
144 	if (r) {
145 		dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
146 			fw_name);
147 		release_firmware(adev->uvd.fw);
148 		adev->uvd.fw = NULL;
149 		return r;
150 	}
151 
152 	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
153 	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
154 	version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
155 	version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
156 	DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
157 		version_major, version_minor, family_id);
158 
159 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
160 		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
161 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
162 			     AMDGPU_GEM_DOMAIN_VRAM,
163 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
164 			     NULL, NULL, &adev->uvd.vcpu_bo);
165 	if (r) {
166 		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
167 		return r;
168 	}
169 
170 	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
171 	if (r) {
172 		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
173 		dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
174 		return r;
175 	}
176 
177 	r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
178 			  &adev->uvd.gpu_addr);
179 	if (r) {
180 		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
181 		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
182 		dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
183 		return r;
184 	}
185 
186 	r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
187 	if (r) {
188 		dev_err(adev->dev, "(%d) UVD map failed\n", r);
189 		return r;
190 	}
191 
192 	amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
193 
194 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
195 		atomic_set(&adev->uvd.handles[i], 0);
196 		adev->uvd.filp[i] = NULL;
197 	}
198 
199 	/* from uvd v5.0 HW addressing capacity increased to 64 bits */
200 	if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
201 		adev->uvd.address_64_bit = true;
202 
203 	return 0;
204 }
205 
206 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
207 {
208 	int r;
209 
210 	if (adev->uvd.vcpu_bo == NULL)
211 		return 0;
212 
213 	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
214 	if (!r) {
215 		amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
216 		amdgpu_bo_unpin(adev->uvd.vcpu_bo);
217 		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
218 	}
219 
220 	amdgpu_bo_unref(&adev->uvd.vcpu_bo);
221 
222 	amdgpu_ring_fini(&adev->uvd.ring);
223 
224 	release_firmware(adev->uvd.fw);
225 
226 	return 0;
227 }
228 
229 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
230 {
231 	struct amdgpu_ring *ring = &adev->uvd.ring;
232 	int i, r;
233 
234 	if (adev->uvd.vcpu_bo == NULL)
235 		return 0;
236 
237 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
238 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
239 		if (handle != 0) {
240 			struct fence *fence;
241 
242 			amdgpu_uvd_note_usage(adev);
243 
244 			r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence);
245 			if (r) {
246 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
247 				continue;
248 			}
249 
250 			fence_wait(fence, false);
251 			fence_put(fence);
252 
253 			adev->uvd.filp[i] = NULL;
254 			atomic_set(&adev->uvd.handles[i], 0);
255 		}
256 	}
257 
258 	return 0;
259 }
260 
261 int amdgpu_uvd_resume(struct amdgpu_device *adev)
262 {
263 	unsigned size;
264 	void *ptr;
265 	const struct common_firmware_header *hdr;
266 	unsigned offset;
267 
268 	if (adev->uvd.vcpu_bo == NULL)
269 		return -EINVAL;
270 
271 	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
272 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
273 	memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
274 		(adev->uvd.fw->size) - offset);
275 
276 	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
277 	size -= le32_to_cpu(hdr->ucode_size_bytes);
278 	ptr = adev->uvd.cpu_addr;
279 	ptr += le32_to_cpu(hdr->ucode_size_bytes);
280 
281 	memset(ptr, 0, size);
282 
283 	return 0;
284 }
285 
286 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
287 {
288 	struct amdgpu_ring *ring = &adev->uvd.ring;
289 	int i, r;
290 
291 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
292 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
293 		if (handle != 0 && adev->uvd.filp[i] == filp) {
294 			struct fence *fence;
295 
296 			amdgpu_uvd_note_usage(adev);
297 
298 			r = amdgpu_uvd_get_destroy_msg(ring, handle,
299 						       false, &fence);
300 			if (r) {
301 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
302 				continue;
303 			}
304 
305 			fence_wait(fence, false);
306 			fence_put(fence);
307 
308 			adev->uvd.filp[i] = NULL;
309 			atomic_set(&adev->uvd.handles[i], 0);
310 		}
311 	}
312 }
313 
314 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
315 {
316 	int i;
317 	for (i = 0; i < rbo->placement.num_placement; ++i) {
318 		rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
319 		rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
320 	}
321 }
322 
323 /**
324  * amdgpu_uvd_cs_pass1 - first parsing round
325  *
326  * @ctx: UVD parser context
327  *
328  * Make sure UVD message and feedback buffers are in VRAM and
329  * nobody is violating an 256MB boundary.
330  */
331 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
332 {
333 	struct amdgpu_bo_va_mapping *mapping;
334 	struct amdgpu_bo *bo;
335 	uint32_t cmd, lo, hi;
336 	uint64_t addr;
337 	int r = 0;
338 
339 	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
340 	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
341 	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
342 
343 	mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
344 	if (mapping == NULL) {
345 		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
346 		return -EINVAL;
347 	}
348 
349 	if (!ctx->parser->adev->uvd.address_64_bit) {
350 		/* check if it's a message or feedback command */
351 		cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
352 		if (cmd == 0x0 || cmd == 0x3) {
353 			/* yes, force it into VRAM */
354 			uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
355 			amdgpu_ttm_placement_from_domain(bo, domain);
356 		}
357 		amdgpu_uvd_force_into_uvd_segment(bo);
358 
359 		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
360 	}
361 
362 	return r;
363 }
364 
365 /**
366  * amdgpu_uvd_cs_msg_decode - handle UVD decode message
367  *
368  * @msg: pointer to message structure
369  * @buf_sizes: returned buffer sizes
370  *
371  * Peek into the decode message and calculate the necessary buffer sizes.
372  */
373 static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
374 {
375 	unsigned stream_type = msg[4];
376 	unsigned width = msg[6];
377 	unsigned height = msg[7];
378 	unsigned dpb_size = msg[9];
379 	unsigned pitch = msg[28];
380 	unsigned level = msg[57];
381 
382 	unsigned width_in_mb = width / 16;
383 	unsigned height_in_mb = ALIGN(height / 16, 2);
384 	unsigned fs_in_mb = width_in_mb * height_in_mb;
385 
386 	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
387 	unsigned min_ctx_size = 0;
388 
389 	image_size = width * height;
390 	image_size += image_size / 2;
391 	image_size = ALIGN(image_size, 1024);
392 
393 	switch (stream_type) {
394 	case 0: /* H264 */
395 	case 7: /* H264 Perf */
396 		switch(level) {
397 		case 30:
398 			num_dpb_buffer = 8100 / fs_in_mb;
399 			break;
400 		case 31:
401 			num_dpb_buffer = 18000 / fs_in_mb;
402 			break;
403 		case 32:
404 			num_dpb_buffer = 20480 / fs_in_mb;
405 			break;
406 		case 41:
407 			num_dpb_buffer = 32768 / fs_in_mb;
408 			break;
409 		case 42:
410 			num_dpb_buffer = 34816 / fs_in_mb;
411 			break;
412 		case 50:
413 			num_dpb_buffer = 110400 / fs_in_mb;
414 			break;
415 		case 51:
416 			num_dpb_buffer = 184320 / fs_in_mb;
417 			break;
418 		default:
419 			num_dpb_buffer = 184320 / fs_in_mb;
420 			break;
421 		}
422 		num_dpb_buffer++;
423 		if (num_dpb_buffer > 17)
424 			num_dpb_buffer = 17;
425 
426 		/* reference picture buffer */
427 		min_dpb_size = image_size * num_dpb_buffer;
428 
429 		/* macroblock context buffer */
430 		min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
431 
432 		/* IT surface buffer */
433 		min_dpb_size += width_in_mb * height_in_mb * 32;
434 		break;
435 
436 	case 1: /* VC1 */
437 
438 		/* reference picture buffer */
439 		min_dpb_size = image_size * 3;
440 
441 		/* CONTEXT_BUFFER */
442 		min_dpb_size += width_in_mb * height_in_mb * 128;
443 
444 		/* IT surface buffer */
445 		min_dpb_size += width_in_mb * 64;
446 
447 		/* DB surface buffer */
448 		min_dpb_size += width_in_mb * 128;
449 
450 		/* BP */
451 		tmp = max(width_in_mb, height_in_mb);
452 		min_dpb_size += ALIGN(tmp * 7 * 16, 64);
453 		break;
454 
455 	case 3: /* MPEG2 */
456 
457 		/* reference picture buffer */
458 		min_dpb_size = image_size * 3;
459 		break;
460 
461 	case 4: /* MPEG4 */
462 
463 		/* reference picture buffer */
464 		min_dpb_size = image_size * 3;
465 
466 		/* CM */
467 		min_dpb_size += width_in_mb * height_in_mb * 64;
468 
469 		/* IT surface buffer */
470 		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
471 		break;
472 
473 	case 16: /* H265 */
474 		image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
475 		image_size = ALIGN(image_size, 256);
476 
477 		num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
478 		min_dpb_size = image_size * num_dpb_buffer;
479 		min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
480 					   * 16 * num_dpb_buffer + 52 * 1024;
481 		break;
482 
483 	default:
484 		DRM_ERROR("UVD codec not handled %d!\n", stream_type);
485 		return -EINVAL;
486 	}
487 
488 	if (width > pitch) {
489 		DRM_ERROR("Invalid UVD decoding target pitch!\n");
490 		return -EINVAL;
491 	}
492 
493 	if (dpb_size < min_dpb_size) {
494 		DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
495 			  dpb_size, min_dpb_size);
496 		return -EINVAL;
497 	}
498 
499 	buf_sizes[0x1] = dpb_size;
500 	buf_sizes[0x2] = image_size;
501 	buf_sizes[0x4] = min_ctx_size;
502 	return 0;
503 }
504 
505 /**
506  * amdgpu_uvd_cs_msg - handle UVD message
507  *
508  * @ctx: UVD parser context
509  * @bo: buffer object containing the message
510  * @offset: offset into the buffer object
511  *
512  * Peek into the UVD message and extract the session id.
513  * Make sure that we don't open up to many sessions.
514  */
515 static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
516 			     struct amdgpu_bo *bo, unsigned offset)
517 {
518 	struct amdgpu_device *adev = ctx->parser->adev;
519 	int32_t *msg, msg_type, handle;
520 	void *ptr;
521 	long r;
522 	int i;
523 
524 	if (offset & 0x3F) {
525 		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
526 		return -EINVAL;
527 	}
528 
529 	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
530 						MAX_SCHEDULE_TIMEOUT);
531 	if (r < 0) {
532 		DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
533 		return r;
534 	}
535 
536 	r = amdgpu_bo_kmap(bo, &ptr);
537 	if (r) {
538 		DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
539 		return r;
540 	}
541 
542 	msg = ptr + offset;
543 
544 	msg_type = msg[1];
545 	handle = msg[2];
546 
547 	if (handle == 0) {
548 		DRM_ERROR("Invalid UVD handle!\n");
549 		return -EINVAL;
550 	}
551 
552 	switch (msg_type) {
553 	case 0:
554 		/* it's a create msg, calc image size (width * height) */
555 		amdgpu_bo_kunmap(bo);
556 
557 		/* try to alloc a new handle */
558 		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
559 			if (atomic_read(&adev->uvd.handles[i]) == handle) {
560 				DRM_ERROR("Handle 0x%x already in use!\n", handle);
561 				return -EINVAL;
562 			}
563 
564 			if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
565 				adev->uvd.filp[i] = ctx->parser->filp;
566 				return 0;
567 			}
568 		}
569 
570 		DRM_ERROR("No more free UVD handles!\n");
571 		return -EINVAL;
572 
573 	case 1:
574 		/* it's a decode msg, calc buffer sizes */
575 		r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
576 		amdgpu_bo_kunmap(bo);
577 		if (r)
578 			return r;
579 
580 		/* validate the handle */
581 		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
582 			if (atomic_read(&adev->uvd.handles[i]) == handle) {
583 				if (adev->uvd.filp[i] != ctx->parser->filp) {
584 					DRM_ERROR("UVD handle collision detected!\n");
585 					return -EINVAL;
586 				}
587 				return 0;
588 			}
589 		}
590 
591 		DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
592 		return -ENOENT;
593 
594 	case 2:
595 		/* it's a destroy msg, free the handle */
596 		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
597 			atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
598 		amdgpu_bo_kunmap(bo);
599 		return 0;
600 
601 	default:
602 		DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
603 		return -EINVAL;
604 	}
605 	BUG();
606 	return -EINVAL;
607 }
608 
609 /**
610  * amdgpu_uvd_cs_pass2 - second parsing round
611  *
612  * @ctx: UVD parser context
613  *
614  * Patch buffer addresses, make sure buffer sizes are correct.
615  */
616 static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
617 {
618 	struct amdgpu_bo_va_mapping *mapping;
619 	struct amdgpu_bo *bo;
620 	uint32_t cmd, lo, hi;
621 	uint64_t start, end;
622 	uint64_t addr;
623 	int r;
624 
625 	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
626 	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
627 	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
628 
629 	mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
630 	if (mapping == NULL)
631 		return -EINVAL;
632 
633 	start = amdgpu_bo_gpu_offset(bo);
634 
635 	end = (mapping->it.last + 1 - mapping->it.start);
636 	end = end * AMDGPU_GPU_PAGE_SIZE + start;
637 
638 	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
639 	start += addr;
640 
641 	amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
642 			    lower_32_bits(start));
643 	amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
644 			    upper_32_bits(start));
645 
646 	cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
647 	if (cmd < 0x4) {
648 		if ((end - start) < ctx->buf_sizes[cmd]) {
649 			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
650 				  (unsigned)(end - start),
651 				  ctx->buf_sizes[cmd]);
652 			return -EINVAL;
653 		}
654 
655 	} else if (cmd == 0x206) {
656 		if ((end - start) < ctx->buf_sizes[4]) {
657 			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
658 					  (unsigned)(end - start),
659 					  ctx->buf_sizes[4]);
660 			return -EINVAL;
661 		}
662 	} else if ((cmd != 0x100) && (cmd != 0x204)) {
663 		DRM_ERROR("invalid UVD command %X!\n", cmd);
664 		return -EINVAL;
665 	}
666 
667 	if (!ctx->parser->adev->uvd.address_64_bit) {
668 		if ((start >> 28) != ((end - 1) >> 28)) {
669 			DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
670 				  start, end);
671 			return -EINVAL;
672 		}
673 
674 		if ((cmd == 0 || cmd == 0x3) &&
675 		    (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
676 			DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
677 				  start, end);
678 			return -EINVAL;
679 		}
680 	}
681 
682 	if (cmd == 0) {
683 		ctx->has_msg_cmd = true;
684 		r = amdgpu_uvd_cs_msg(ctx, bo, addr);
685 		if (r)
686 			return r;
687 	} else if (!ctx->has_msg_cmd) {
688 		DRM_ERROR("Message needed before other commands are send!\n");
689 		return -EINVAL;
690 	}
691 
692 	return 0;
693 }
694 
695 /**
696  * amdgpu_uvd_cs_reg - parse register writes
697  *
698  * @ctx: UVD parser context
699  * @cb: callback function
700  *
701  * Parse the register writes, call cb on each complete command.
702  */
703 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
704 			     int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
705 {
706 	struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
707 	int i, r;
708 
709 	ctx->idx++;
710 	for (i = 0; i <= ctx->count; ++i) {
711 		unsigned reg = ctx->reg + i;
712 
713 		if (ctx->idx >= ib->length_dw) {
714 			DRM_ERROR("Register command after end of CS!\n");
715 			return -EINVAL;
716 		}
717 
718 		switch (reg) {
719 		case mmUVD_GPCOM_VCPU_DATA0:
720 			ctx->data0 = ctx->idx;
721 			break;
722 		case mmUVD_GPCOM_VCPU_DATA1:
723 			ctx->data1 = ctx->idx;
724 			break;
725 		case mmUVD_GPCOM_VCPU_CMD:
726 			r = cb(ctx);
727 			if (r)
728 				return r;
729 			break;
730 		case mmUVD_ENGINE_CNTL:
731 			break;
732 		default:
733 			DRM_ERROR("Invalid reg 0x%X!\n", reg);
734 			return -EINVAL;
735 		}
736 		ctx->idx++;
737 	}
738 	return 0;
739 }
740 
741 /**
742  * amdgpu_uvd_cs_packets - parse UVD packets
743  *
744  * @ctx: UVD parser context
745  * @cb: callback function
746  *
747  * Parse the command stream packets.
748  */
749 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
750 				 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
751 {
752 	struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
753 	int r;
754 
755 	for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
756 		uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
757 		unsigned type = CP_PACKET_GET_TYPE(cmd);
758 		switch (type) {
759 		case PACKET_TYPE0:
760 			ctx->reg = CP_PACKET0_GET_REG(cmd);
761 			ctx->count = CP_PACKET_GET_COUNT(cmd);
762 			r = amdgpu_uvd_cs_reg(ctx, cb);
763 			if (r)
764 				return r;
765 			break;
766 		case PACKET_TYPE2:
767 			++ctx->idx;
768 			break;
769 		default:
770 			DRM_ERROR("Unknown packet type %d !\n", type);
771 			return -EINVAL;
772 		}
773 	}
774 	return 0;
775 }
776 
777 /**
778  * amdgpu_uvd_ring_parse_cs - UVD command submission parser
779  *
780  * @parser: Command submission parser context
781  *
782  * Parse the command stream, patch in addresses as necessary.
783  */
784 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
785 {
786 	struct amdgpu_uvd_cs_ctx ctx = {};
787 	unsigned buf_sizes[] = {
788 		[0x00000000]	=	2048,
789 		[0x00000001]	=	0xFFFFFFFF,
790 		[0x00000002]	=	0xFFFFFFFF,
791 		[0x00000003]	=	2048,
792 		[0x00000004]	=	0xFFFFFFFF,
793 	};
794 	struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
795 	int r;
796 
797 	if (ib->length_dw % 16) {
798 		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
799 			  ib->length_dw);
800 		return -EINVAL;
801 	}
802 
803 	ctx.parser = parser;
804 	ctx.buf_sizes = buf_sizes;
805 	ctx.ib_idx = ib_idx;
806 
807 	/* first round, make sure the buffers are actually in the UVD segment */
808 	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
809 	if (r)
810 		return r;
811 
812 	/* second round, patch buffer addresses into the command stream */
813 	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
814 	if (r)
815 		return r;
816 
817 	if (!ctx.has_msg_cmd) {
818 		DRM_ERROR("UVD-IBs need a msg command!\n");
819 		return -EINVAL;
820 	}
821 
822 	amdgpu_uvd_note_usage(ctx.parser->adev);
823 
824 	return 0;
825 }
826 
827 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
828 			       bool direct, struct fence **fence)
829 {
830 	struct ttm_validate_buffer tv;
831 	struct ww_acquire_ctx ticket;
832 	struct list_head head;
833 	struct amdgpu_job *job;
834 	struct amdgpu_ib *ib;
835 	struct fence *f = NULL;
836 	struct amdgpu_device *adev = ring->adev;
837 	uint64_t addr;
838 	int i, r;
839 
840 	memset(&tv, 0, sizeof(tv));
841 	tv.bo = &bo->tbo;
842 
843 	INIT_LIST_HEAD(&head);
844 	list_add(&tv.head, &head);
845 
846 	r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
847 	if (r)
848 		return r;
849 
850 	if (!bo->adev->uvd.address_64_bit) {
851 		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
852 		amdgpu_uvd_force_into_uvd_segment(bo);
853 	}
854 
855 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
856 	if (r)
857 		goto err;
858 
859 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
860 	if (r)
861 		goto err;
862 
863 	ib = &job->ibs[0];
864 	addr = amdgpu_bo_gpu_offset(bo);
865 	ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
866 	ib->ptr[1] = addr;
867 	ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
868 	ib->ptr[3] = addr >> 32;
869 	ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
870 	ib->ptr[5] = 0;
871 	for (i = 6; i < 16; ++i)
872 		ib->ptr[i] = PACKET2(0);
873 	ib->length_dw = 16;
874 
875 	if (direct) {
876 		r = amdgpu_ib_schedule(ring, 1, ib,
877 				       AMDGPU_FENCE_OWNER_UNDEFINED, NULL, &f);
878 		if (r)
879 			goto err_free;
880 
881 		amdgpu_job_free(job);
882 	} else {
883 		r = amdgpu_job_submit(job, ring,
884 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
885 		if (r)
886 			goto err_free;
887 	}
888 
889 	ttm_eu_fence_buffer_objects(&ticket, &head, f);
890 
891 	if (fence)
892 		*fence = fence_get(f);
893 	amdgpu_bo_unref(&bo);
894 	fence_put(f);
895 
896 	return 0;
897 
898 err_free:
899 	amdgpu_job_free(job);
900 
901 err:
902 	ttm_eu_backoff_reservation(&ticket, &head);
903 	return r;
904 }
905 
906 /* multiple fence commands without any stream commands in between can
907    crash the vcpu so just try to emmit a dummy create/destroy msg to
908    avoid this */
909 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
910 			      struct fence **fence)
911 {
912 	struct amdgpu_device *adev = ring->adev;
913 	struct amdgpu_bo *bo;
914 	uint32_t *msg;
915 	int r, i;
916 
917 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
918 			     AMDGPU_GEM_DOMAIN_VRAM,
919 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
920 			     NULL, NULL, &bo);
921 	if (r)
922 		return r;
923 
924 	r = amdgpu_bo_reserve(bo, false);
925 	if (r) {
926 		amdgpu_bo_unref(&bo);
927 		return r;
928 	}
929 
930 	r = amdgpu_bo_kmap(bo, (void **)&msg);
931 	if (r) {
932 		amdgpu_bo_unreserve(bo);
933 		amdgpu_bo_unref(&bo);
934 		return r;
935 	}
936 
937 	/* stitch together an UVD create msg */
938 	msg[0] = cpu_to_le32(0x00000de4);
939 	msg[1] = cpu_to_le32(0x00000000);
940 	msg[2] = cpu_to_le32(handle);
941 	msg[3] = cpu_to_le32(0x00000000);
942 	msg[4] = cpu_to_le32(0x00000000);
943 	msg[5] = cpu_to_le32(0x00000000);
944 	msg[6] = cpu_to_le32(0x00000000);
945 	msg[7] = cpu_to_le32(0x00000780);
946 	msg[8] = cpu_to_le32(0x00000440);
947 	msg[9] = cpu_to_le32(0x00000000);
948 	msg[10] = cpu_to_le32(0x01b37000);
949 	for (i = 11; i < 1024; ++i)
950 		msg[i] = cpu_to_le32(0x0);
951 
952 	amdgpu_bo_kunmap(bo);
953 	amdgpu_bo_unreserve(bo);
954 
955 	return amdgpu_uvd_send_msg(ring, bo, true, fence);
956 }
957 
958 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
959 			       bool direct, struct fence **fence)
960 {
961 	struct amdgpu_device *adev = ring->adev;
962 	struct amdgpu_bo *bo;
963 	uint32_t *msg;
964 	int r, i;
965 
966 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
967 			     AMDGPU_GEM_DOMAIN_VRAM,
968 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
969 			     NULL, NULL, &bo);
970 	if (r)
971 		return r;
972 
973 	r = amdgpu_bo_reserve(bo, false);
974 	if (r) {
975 		amdgpu_bo_unref(&bo);
976 		return r;
977 	}
978 
979 	r = amdgpu_bo_kmap(bo, (void **)&msg);
980 	if (r) {
981 		amdgpu_bo_unreserve(bo);
982 		amdgpu_bo_unref(&bo);
983 		return r;
984 	}
985 
986 	/* stitch together an UVD destroy msg */
987 	msg[0] = cpu_to_le32(0x00000de4);
988 	msg[1] = cpu_to_le32(0x00000002);
989 	msg[2] = cpu_to_le32(handle);
990 	msg[3] = cpu_to_le32(0x00000000);
991 	for (i = 4; i < 1024; ++i)
992 		msg[i] = cpu_to_le32(0x0);
993 
994 	amdgpu_bo_kunmap(bo);
995 	amdgpu_bo_unreserve(bo);
996 
997 	return amdgpu_uvd_send_msg(ring, bo, direct, fence);
998 }
999 
1000 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1001 {
1002 	struct amdgpu_device *adev =
1003 		container_of(work, struct amdgpu_device, uvd.idle_work.work);
1004 	unsigned i, fences, handles = 0;
1005 
1006 	fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
1007 
1008 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
1009 		if (atomic_read(&adev->uvd.handles[i]))
1010 			++handles;
1011 
1012 	if (fences == 0 && handles == 0) {
1013 		if (adev->pm.dpm_enabled) {
1014 			amdgpu_dpm_enable_uvd(adev, false);
1015 		} else {
1016 			amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1017 		}
1018 	} else {
1019 		schedule_delayed_work(&adev->uvd.idle_work,
1020 				      msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
1021 	}
1022 }
1023 
1024 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
1025 {
1026 	bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1027 	set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
1028 					    msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
1029 
1030 	if (set_clocks) {
1031 		if (adev->pm.dpm_enabled) {
1032 			amdgpu_dpm_enable_uvd(adev, true);
1033 		} else {
1034 			amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1035 		}
1036 	}
1037 }
1038