1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33
34 #include <drm/drm.h>
35 #include <drm/drm_drv.h>
36
37 #include "amdgpu.h"
38 #include "amdgpu_pm.h"
39 #include "amdgpu_uvd.h"
40 #include "amdgpu_cs.h"
41 #include "cikd.h"
42 #include "uvd/uvd_4_2_d.h"
43
44 #include "amdgpu_ras.h"
45
46 /* 1 second timeout */
47 #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
48
49 /* Firmware versions for VI */
50 #define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
51 #define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
52 #define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
53 #define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
54
55 /* Polaris10/11 firmware version */
56 #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
57
58 /* Firmware Names */
59 #ifdef CONFIG_DRM_AMDGPU_SI
60 #define FIRMWARE_TAHITI "amdgpu/tahiti_uvd.bin"
61 #define FIRMWARE_VERDE "amdgpu/verde_uvd.bin"
62 #define FIRMWARE_PITCAIRN "amdgpu/pitcairn_uvd.bin"
63 #define FIRMWARE_OLAND "amdgpu/oland_uvd.bin"
64 #endif
65 #ifdef CONFIG_DRM_AMDGPU_CIK
66 #define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
67 #define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
68 #define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
69 #define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
70 #define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
71 #endif
72 #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
73 #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
74 #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
75 #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
76 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
77 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
78 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
79 #define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
80
81 #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
82 #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
83 #define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
84
85 /* These are common relative offsets for all asics, from uvd_7_0_offset.h, */
86 #define UVD_GPCOM_VCPU_CMD 0x03c3
87 #define UVD_GPCOM_VCPU_DATA0 0x03c4
88 #define UVD_GPCOM_VCPU_DATA1 0x03c5
89 #define UVD_NO_OP 0x03ff
90 #define UVD_BASE_SI 0x3800
91
92 /*
93 * amdgpu_uvd_cs_ctx - Command submission parser context
94 *
95 * Used for emulating virtual memory support on UVD 4.2.
96 */
97 struct amdgpu_uvd_cs_ctx {
98 struct amdgpu_cs_parser *parser;
99 unsigned int reg, count;
100 unsigned int data0, data1;
101 unsigned int idx;
102 struct amdgpu_ib *ib;
103
104 /* does the IB has a msg command */
105 bool has_msg_cmd;
106
107 /* minimum buffer sizes */
108 unsigned int *buf_sizes;
109 };
110
111 #ifdef CONFIG_DRM_AMDGPU_SI
112 MODULE_FIRMWARE(FIRMWARE_TAHITI);
113 MODULE_FIRMWARE(FIRMWARE_VERDE);
114 MODULE_FIRMWARE(FIRMWARE_PITCAIRN);
115 MODULE_FIRMWARE(FIRMWARE_OLAND);
116 #endif
117 #ifdef CONFIG_DRM_AMDGPU_CIK
118 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
119 MODULE_FIRMWARE(FIRMWARE_KABINI);
120 MODULE_FIRMWARE(FIRMWARE_KAVERI);
121 MODULE_FIRMWARE(FIRMWARE_HAWAII);
122 MODULE_FIRMWARE(FIRMWARE_MULLINS);
123 #endif
124 MODULE_FIRMWARE(FIRMWARE_TONGA);
125 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
126 MODULE_FIRMWARE(FIRMWARE_FIJI);
127 MODULE_FIRMWARE(FIRMWARE_STONEY);
128 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
129 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
130 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
131 MODULE_FIRMWARE(FIRMWARE_VEGAM);
132
133 MODULE_FIRMWARE(FIRMWARE_VEGA10);
134 MODULE_FIRMWARE(FIRMWARE_VEGA12);
135 MODULE_FIRMWARE(FIRMWARE_VEGA20);
136
137 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
138 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo);
139
amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device * adev,uint32_t size,struct amdgpu_bo ** bo_ptr)140 static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev,
141 uint32_t size,
142 struct amdgpu_bo **bo_ptr)
143 {
144 struct ttm_operation_ctx ctx = { true, false };
145 struct amdgpu_bo *bo = NULL;
146 void *addr;
147 int r;
148
149 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
150 AMDGPU_GEM_DOMAIN_GTT,
151 &bo, NULL, &addr);
152 if (r)
153 return r;
154
155 if (adev->uvd.address_64_bit)
156 goto succ;
157
158 amdgpu_bo_kunmap(bo);
159 amdgpu_bo_unpin(bo);
160 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
161 amdgpu_uvd_force_into_uvd_segment(bo);
162 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
163 if (r)
164 goto err;
165 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM);
166 if (r)
167 goto err_pin;
168 r = amdgpu_bo_kmap(bo, &addr);
169 if (r)
170 goto err_kmap;
171 succ:
172 amdgpu_bo_unreserve(bo);
173 *bo_ptr = bo;
174 return 0;
175 err_kmap:
176 amdgpu_bo_unpin(bo);
177 err_pin:
178 err:
179 amdgpu_bo_unreserve(bo);
180 amdgpu_bo_unref(&bo);
181 return r;
182 }
183
amdgpu_uvd_sw_init(struct amdgpu_device * adev)184 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
185 {
186 unsigned long bo_size;
187 const char *fw_name;
188 const struct common_firmware_header *hdr;
189 unsigned int family_id;
190 int i, j, r;
191
192 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
193
194 switch (adev->asic_type) {
195 #ifdef CONFIG_DRM_AMDGPU_SI
196 case CHIP_TAHITI:
197 fw_name = FIRMWARE_TAHITI;
198 break;
199 case CHIP_VERDE:
200 fw_name = FIRMWARE_VERDE;
201 break;
202 case CHIP_PITCAIRN:
203 fw_name = FIRMWARE_PITCAIRN;
204 break;
205 case CHIP_OLAND:
206 fw_name = FIRMWARE_OLAND;
207 break;
208 #endif
209 #ifdef CONFIG_DRM_AMDGPU_CIK
210 case CHIP_BONAIRE:
211 fw_name = FIRMWARE_BONAIRE;
212 break;
213 case CHIP_KABINI:
214 fw_name = FIRMWARE_KABINI;
215 break;
216 case CHIP_KAVERI:
217 fw_name = FIRMWARE_KAVERI;
218 break;
219 case CHIP_HAWAII:
220 fw_name = FIRMWARE_HAWAII;
221 break;
222 case CHIP_MULLINS:
223 fw_name = FIRMWARE_MULLINS;
224 break;
225 #endif
226 case CHIP_TONGA:
227 fw_name = FIRMWARE_TONGA;
228 break;
229 case CHIP_FIJI:
230 fw_name = FIRMWARE_FIJI;
231 break;
232 case CHIP_CARRIZO:
233 fw_name = FIRMWARE_CARRIZO;
234 break;
235 case CHIP_STONEY:
236 fw_name = FIRMWARE_STONEY;
237 break;
238 case CHIP_POLARIS10:
239 fw_name = FIRMWARE_POLARIS10;
240 break;
241 case CHIP_POLARIS11:
242 fw_name = FIRMWARE_POLARIS11;
243 break;
244 case CHIP_POLARIS12:
245 fw_name = FIRMWARE_POLARIS12;
246 break;
247 case CHIP_VEGA10:
248 fw_name = FIRMWARE_VEGA10;
249 break;
250 case CHIP_VEGA12:
251 fw_name = FIRMWARE_VEGA12;
252 break;
253 case CHIP_VEGAM:
254 fw_name = FIRMWARE_VEGAM;
255 break;
256 case CHIP_VEGA20:
257 fw_name = FIRMWARE_VEGA20;
258 break;
259 default:
260 return -EINVAL;
261 }
262
263 r = amdgpu_ucode_request(adev, &adev->uvd.fw, fw_name);
264 if (r) {
265 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
266 fw_name);
267 amdgpu_ucode_release(&adev->uvd.fw);
268 return r;
269 }
270
271 /* Set the default UVD handles that the firmware can handle */
272 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
273
274 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
275 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
276
277 if (adev->asic_type < CHIP_VEGA20) {
278 unsigned int version_major, version_minor;
279
280 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
281 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
282 DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",
283 version_major, version_minor, family_id);
284
285 /*
286 * Limit the number of UVD handles depending on microcode major
287 * and minor versions. The firmware version which has 40 UVD
288 * instances support is 1.80. So all subsequent versions should
289 * also have the same support.
290 */
291 if ((version_major > 0x01) ||
292 ((version_major == 0x01) && (version_minor >= 0x50)))
293 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
294
295 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
296 (family_id << 8));
297
298 if ((adev->asic_type == CHIP_POLARIS10 ||
299 adev->asic_type == CHIP_POLARIS11) &&
300 (adev->uvd.fw_version < FW_1_66_16))
301 DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
302 version_major, version_minor);
303 } else {
304 unsigned int enc_major, enc_minor, dec_minor;
305
306 dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
307 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
308 enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
309 DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n",
310 enc_major, enc_minor, dec_minor, family_id);
311
312 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
313
314 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
315 }
316
317 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
318 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
319 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
320 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
321
322 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
323 if (adev->uvd.harvest_config & (1 << j))
324 continue;
325 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
326 AMDGPU_GEM_DOMAIN_VRAM |
327 AMDGPU_GEM_DOMAIN_GTT,
328 &adev->uvd.inst[j].vcpu_bo,
329 &adev->uvd.inst[j].gpu_addr,
330 &adev->uvd.inst[j].cpu_addr);
331 if (r) {
332 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
333 return r;
334 }
335 }
336
337 for (i = 0; i < adev->uvd.max_handles; ++i) {
338 atomic_set(&adev->uvd.handles[i], 0);
339 adev->uvd.filp[i] = NULL;
340 }
341
342 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
343 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
344 adev->uvd.address_64_bit = true;
345
346 r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo);
347 if (r)
348 return r;
349
350 switch (adev->asic_type) {
351 case CHIP_TONGA:
352 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
353 break;
354 case CHIP_CARRIZO:
355 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
356 break;
357 case CHIP_FIJI:
358 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
359 break;
360 case CHIP_STONEY:
361 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
362 break;
363 default:
364 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
365 }
366
367 return 0;
368 }
369
amdgpu_uvd_sw_fini(struct amdgpu_device * adev)370 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
371 {
372 void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo);
373 int i, j;
374
375 drm_sched_entity_destroy(&adev->uvd.entity);
376
377 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
378 if (adev->uvd.harvest_config & (1 << j))
379 continue;
380 kvfree(adev->uvd.inst[j].saved_bo);
381
382 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
383 &adev->uvd.inst[j].gpu_addr,
384 (void **)&adev->uvd.inst[j].cpu_addr);
385
386 amdgpu_ring_fini(&adev->uvd.inst[j].ring);
387
388 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
389 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
390 }
391 amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr);
392 amdgpu_ucode_release(&adev->uvd.fw);
393
394 return 0;
395 }
396
397 /**
398 * amdgpu_uvd_entity_init - init entity
399 *
400 * @adev: amdgpu_device pointer
401 *
402 */
amdgpu_uvd_entity_init(struct amdgpu_device * adev)403 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
404 {
405 struct amdgpu_ring *ring;
406 struct drm_gpu_scheduler *sched;
407 int r;
408
409 ring = &adev->uvd.inst[0].ring;
410 sched = &ring->sched;
411 r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
412 &sched, 1, NULL);
413 if (r) {
414 DRM_ERROR("Failed setting up UVD kernel entity.\n");
415 return r;
416 }
417
418 return 0;
419 }
420
amdgpu_uvd_suspend(struct amdgpu_device * adev)421 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
422 {
423 unsigned int size;
424 void *ptr;
425 int i, j, idx;
426 bool in_ras_intr = amdgpu_ras_intr_triggered();
427
428 cancel_delayed_work_sync(&adev->uvd.idle_work);
429
430 /* only valid for physical mode */
431 if (adev->asic_type < CHIP_POLARIS10) {
432 for (i = 0; i < adev->uvd.max_handles; ++i)
433 if (atomic_read(&adev->uvd.handles[i]))
434 break;
435
436 if (i == adev->uvd.max_handles)
437 return 0;
438 }
439
440 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
441 if (adev->uvd.harvest_config & (1 << j))
442 continue;
443 if (adev->uvd.inst[j].vcpu_bo == NULL)
444 continue;
445
446 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
447 ptr = adev->uvd.inst[j].cpu_addr;
448
449 adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
450 if (!adev->uvd.inst[j].saved_bo)
451 return -ENOMEM;
452
453 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
454 /* re-write 0 since err_event_athub will corrupt VCPU buffer */
455 if (in_ras_intr)
456 memset(adev->uvd.inst[j].saved_bo, 0, size);
457 else
458 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
459
460 drm_dev_exit(idx);
461 }
462 }
463
464 if (in_ras_intr)
465 DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
466
467 return 0;
468 }
469
amdgpu_uvd_resume(struct amdgpu_device * adev)470 int amdgpu_uvd_resume(struct amdgpu_device *adev)
471 {
472 unsigned int size;
473 void *ptr;
474 int i, idx;
475
476 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
477 if (adev->uvd.harvest_config & (1 << i))
478 continue;
479 if (adev->uvd.inst[i].vcpu_bo == NULL)
480 return -EINVAL;
481
482 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
483 ptr = adev->uvd.inst[i].cpu_addr;
484
485 if (adev->uvd.inst[i].saved_bo != NULL) {
486 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
487 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
488 drm_dev_exit(idx);
489 }
490 kvfree(adev->uvd.inst[i].saved_bo);
491 adev->uvd.inst[i].saved_bo = NULL;
492 } else {
493 const struct common_firmware_header *hdr;
494 unsigned int offset;
495
496 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
497 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
498 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
499 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
500 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
501 le32_to_cpu(hdr->ucode_size_bytes));
502 drm_dev_exit(idx);
503 }
504 size -= le32_to_cpu(hdr->ucode_size_bytes);
505 ptr += le32_to_cpu(hdr->ucode_size_bytes);
506 }
507 memset_io(ptr, 0, size);
508 /* to restore uvd fence seq */
509 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
510 }
511 }
512 return 0;
513 }
514
amdgpu_uvd_free_handles(struct amdgpu_device * adev,struct drm_file * filp)515 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
516 {
517 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
518 int i, r;
519
520 for (i = 0; i < adev->uvd.max_handles; ++i) {
521 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
522
523 if (handle != 0 && adev->uvd.filp[i] == filp) {
524 struct dma_fence *fence;
525
526 r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
527 &fence);
528 if (r) {
529 DRM_ERROR("Error destroying UVD %d!\n", r);
530 continue;
531 }
532
533 dma_fence_wait(fence, false);
534 dma_fence_put(fence);
535
536 adev->uvd.filp[i] = NULL;
537 atomic_set(&adev->uvd.handles[i], 0);
538 }
539 }
540 }
541
amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo * abo)542 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
543 {
544 int i;
545
546 for (i = 0; i < abo->placement.num_placement; ++i) {
547 abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
548 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
549 }
550 }
551
amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx * ctx)552 static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
553 {
554 uint32_t lo, hi;
555 uint64_t addr;
556
557 lo = amdgpu_ib_get_value(ctx->ib, ctx->data0);
558 hi = amdgpu_ib_get_value(ctx->ib, ctx->data1);
559 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
560
561 return addr;
562 }
563
564 /**
565 * amdgpu_uvd_cs_pass1 - first parsing round
566 *
567 * @ctx: UVD parser context
568 *
569 * Make sure UVD message and feedback buffers are in VRAM and
570 * nobody is violating an 256MB boundary.
571 */
amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx * ctx)572 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
573 {
574 struct ttm_operation_ctx tctx = { false, false };
575 struct amdgpu_bo_va_mapping *mapping;
576 struct amdgpu_bo *bo;
577 uint32_t cmd;
578 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
579 int r = 0;
580
581 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
582 if (r) {
583 DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
584 return r;
585 }
586
587 if (!ctx->parser->adev->uvd.address_64_bit) {
588 /* check if it's a message or feedback command */
589 cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx) >> 1;
590 if (cmd == 0x0 || cmd == 0x3) {
591 /* yes, force it into VRAM */
592 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
593
594 amdgpu_bo_placement_from_domain(bo, domain);
595 }
596 amdgpu_uvd_force_into_uvd_segment(bo);
597
598 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
599 }
600
601 return r;
602 }
603
604 /**
605 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
606 *
607 * @adev: amdgpu_device pointer
608 * @msg: pointer to message structure
609 * @buf_sizes: placeholder to put the different buffer lengths
610 *
611 * Peek into the decode message and calculate the necessary buffer sizes.
612 */
amdgpu_uvd_cs_msg_decode(struct amdgpu_device * adev,uint32_t * msg,unsigned int buf_sizes[])613 static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
614 unsigned int buf_sizes[])
615 {
616 unsigned int stream_type = msg[4];
617 unsigned int width = msg[6];
618 unsigned int height = msg[7];
619 unsigned int dpb_size = msg[9];
620 unsigned int pitch = msg[28];
621 unsigned int level = msg[57];
622
623 unsigned int width_in_mb = width / 16;
624 unsigned int height_in_mb = ALIGN(height / 16, 2);
625 unsigned int fs_in_mb = width_in_mb * height_in_mb;
626
627 unsigned int image_size, tmp, min_dpb_size, num_dpb_buffer;
628 unsigned int min_ctx_size = ~0;
629
630 image_size = width * height;
631 image_size += image_size / 2;
632 image_size = ALIGN(image_size, 1024);
633
634 switch (stream_type) {
635 case 0: /* H264 */
636 switch (level) {
637 case 30:
638 num_dpb_buffer = 8100 / fs_in_mb;
639 break;
640 case 31:
641 num_dpb_buffer = 18000 / fs_in_mb;
642 break;
643 case 32:
644 num_dpb_buffer = 20480 / fs_in_mb;
645 break;
646 case 41:
647 num_dpb_buffer = 32768 / fs_in_mb;
648 break;
649 case 42:
650 num_dpb_buffer = 34816 / fs_in_mb;
651 break;
652 case 50:
653 num_dpb_buffer = 110400 / fs_in_mb;
654 break;
655 case 51:
656 num_dpb_buffer = 184320 / fs_in_mb;
657 break;
658 default:
659 num_dpb_buffer = 184320 / fs_in_mb;
660 break;
661 }
662 num_dpb_buffer++;
663 if (num_dpb_buffer > 17)
664 num_dpb_buffer = 17;
665
666 /* reference picture buffer */
667 min_dpb_size = image_size * num_dpb_buffer;
668
669 /* macroblock context buffer */
670 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
671
672 /* IT surface buffer */
673 min_dpb_size += width_in_mb * height_in_mb * 32;
674 break;
675
676 case 1: /* VC1 */
677
678 /* reference picture buffer */
679 min_dpb_size = image_size * 3;
680
681 /* CONTEXT_BUFFER */
682 min_dpb_size += width_in_mb * height_in_mb * 128;
683
684 /* IT surface buffer */
685 min_dpb_size += width_in_mb * 64;
686
687 /* DB surface buffer */
688 min_dpb_size += width_in_mb * 128;
689
690 /* BP */
691 tmp = max(width_in_mb, height_in_mb);
692 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
693 break;
694
695 case 3: /* MPEG2 */
696
697 /* reference picture buffer */
698 min_dpb_size = image_size * 3;
699 break;
700
701 case 4: /* MPEG4 */
702
703 /* reference picture buffer */
704 min_dpb_size = image_size * 3;
705
706 /* CM */
707 min_dpb_size += width_in_mb * height_in_mb * 64;
708
709 /* IT surface buffer */
710 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
711 break;
712
713 case 7: /* H264 Perf */
714 switch (level) {
715 case 30:
716 num_dpb_buffer = 8100 / fs_in_mb;
717 break;
718 case 31:
719 num_dpb_buffer = 18000 / fs_in_mb;
720 break;
721 case 32:
722 num_dpb_buffer = 20480 / fs_in_mb;
723 break;
724 case 41:
725 num_dpb_buffer = 32768 / fs_in_mb;
726 break;
727 case 42:
728 num_dpb_buffer = 34816 / fs_in_mb;
729 break;
730 case 50:
731 num_dpb_buffer = 110400 / fs_in_mb;
732 break;
733 case 51:
734 num_dpb_buffer = 184320 / fs_in_mb;
735 break;
736 default:
737 num_dpb_buffer = 184320 / fs_in_mb;
738 break;
739 }
740 num_dpb_buffer++;
741 if (num_dpb_buffer > 17)
742 num_dpb_buffer = 17;
743
744 /* reference picture buffer */
745 min_dpb_size = image_size * num_dpb_buffer;
746
747 if (!adev->uvd.use_ctx_buf) {
748 /* macroblock context buffer */
749 min_dpb_size +=
750 width_in_mb * height_in_mb * num_dpb_buffer * 192;
751
752 /* IT surface buffer */
753 min_dpb_size += width_in_mb * height_in_mb * 32;
754 } else {
755 /* macroblock context buffer */
756 min_ctx_size =
757 width_in_mb * height_in_mb * num_dpb_buffer * 192;
758 }
759 break;
760
761 case 8: /* MJPEG */
762 min_dpb_size = 0;
763 break;
764
765 case 16: /* H265 */
766 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
767 image_size = ALIGN(image_size, 256);
768
769 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
770 min_dpb_size = image_size * num_dpb_buffer;
771 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
772 * 16 * num_dpb_buffer + 52 * 1024;
773 break;
774
775 default:
776 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
777 return -EINVAL;
778 }
779
780 if (width > pitch) {
781 DRM_ERROR("Invalid UVD decoding target pitch!\n");
782 return -EINVAL;
783 }
784
785 if (dpb_size < min_dpb_size) {
786 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
787 dpb_size, min_dpb_size);
788 return -EINVAL;
789 }
790
791 buf_sizes[0x1] = dpb_size;
792 buf_sizes[0x2] = image_size;
793 buf_sizes[0x4] = min_ctx_size;
794 /* store image width to adjust nb memory pstate */
795 adev->uvd.decode_image_width = width;
796 return 0;
797 }
798
799 /**
800 * amdgpu_uvd_cs_msg - handle UVD message
801 *
802 * @ctx: UVD parser context
803 * @bo: buffer object containing the message
804 * @offset: offset into the buffer object
805 *
806 * Peek into the UVD message and extract the session id.
807 * Make sure that we don't open up to many sessions.
808 */
amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx * ctx,struct amdgpu_bo * bo,unsigned int offset)809 static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
810 struct amdgpu_bo *bo, unsigned int offset)
811 {
812 struct amdgpu_device *adev = ctx->parser->adev;
813 int32_t *msg, msg_type, handle;
814 void *ptr;
815 long r;
816 int i;
817
818 if (offset & 0x3F) {
819 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
820 return -EINVAL;
821 }
822
823 r = amdgpu_bo_kmap(bo, &ptr);
824 if (r) {
825 DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
826 return r;
827 }
828
829 msg = ptr + offset;
830
831 msg_type = msg[1];
832 handle = msg[2];
833
834 if (handle == 0) {
835 amdgpu_bo_kunmap(bo);
836 DRM_ERROR("Invalid UVD handle!\n");
837 return -EINVAL;
838 }
839
840 switch (msg_type) {
841 case 0:
842 /* it's a create msg, calc image size (width * height) */
843 amdgpu_bo_kunmap(bo);
844
845 /* try to alloc a new handle */
846 for (i = 0; i < adev->uvd.max_handles; ++i) {
847 if (atomic_read(&adev->uvd.handles[i]) == handle) {
848 DRM_ERROR(")Handle 0x%x already in use!\n",
849 handle);
850 return -EINVAL;
851 }
852
853 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
854 adev->uvd.filp[i] = ctx->parser->filp;
855 return 0;
856 }
857 }
858
859 DRM_ERROR("No more free UVD handles!\n");
860 return -ENOSPC;
861
862 case 1:
863 /* it's a decode msg, calc buffer sizes */
864 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
865 amdgpu_bo_kunmap(bo);
866 if (r)
867 return r;
868
869 /* validate the handle */
870 for (i = 0; i < adev->uvd.max_handles; ++i) {
871 if (atomic_read(&adev->uvd.handles[i]) == handle) {
872 if (adev->uvd.filp[i] != ctx->parser->filp) {
873 DRM_ERROR("UVD handle collision detected!\n");
874 return -EINVAL;
875 }
876 return 0;
877 }
878 }
879
880 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
881 return -ENOENT;
882
883 case 2:
884 /* it's a destroy msg, free the handle */
885 for (i = 0; i < adev->uvd.max_handles; ++i)
886 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
887 amdgpu_bo_kunmap(bo);
888 return 0;
889
890 default:
891 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
892 }
893
894 amdgpu_bo_kunmap(bo);
895 return -EINVAL;
896 }
897
898 /**
899 * amdgpu_uvd_cs_pass2 - second parsing round
900 *
901 * @ctx: UVD parser context
902 *
903 * Patch buffer addresses, make sure buffer sizes are correct.
904 */
amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx * ctx)905 static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
906 {
907 struct amdgpu_bo_va_mapping *mapping;
908 struct amdgpu_bo *bo;
909 uint32_t cmd;
910 uint64_t start, end;
911 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
912 int r;
913
914 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
915 if (r) {
916 DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
917 return r;
918 }
919
920 start = amdgpu_bo_gpu_offset(bo);
921
922 end = (mapping->last + 1 - mapping->start);
923 end = end * AMDGPU_GPU_PAGE_SIZE + start;
924
925 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
926 start += addr;
927
928 amdgpu_ib_set_value(ctx->ib, ctx->data0, lower_32_bits(start));
929 amdgpu_ib_set_value(ctx->ib, ctx->data1, upper_32_bits(start));
930
931 cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx) >> 1;
932 if (cmd < 0x4) {
933 if ((end - start) < ctx->buf_sizes[cmd]) {
934 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
935 (unsigned int)(end - start),
936 ctx->buf_sizes[cmd]);
937 return -EINVAL;
938 }
939
940 } else if (cmd == 0x206) {
941 if ((end - start) < ctx->buf_sizes[4]) {
942 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
943 (unsigned int)(end - start),
944 ctx->buf_sizes[4]);
945 return -EINVAL;
946 }
947 } else if ((cmd != 0x100) && (cmd != 0x204)) {
948 DRM_ERROR("invalid UVD command %X!\n", cmd);
949 return -EINVAL;
950 }
951
952 if (!ctx->parser->adev->uvd.address_64_bit) {
953 if ((start >> 28) != ((end - 1) >> 28)) {
954 DRM_ERROR("reloc %llx-%llx crossing 256MB boundary!\n",
955 start, end);
956 return -EINVAL;
957 }
958
959 if ((cmd == 0 || cmd == 0x3) &&
960 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
961 DRM_ERROR("msg/fb buffer %llx-%llx out of 256MB segment!\n",
962 start, end);
963 return -EINVAL;
964 }
965 }
966
967 if (cmd == 0) {
968 ctx->has_msg_cmd = true;
969 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
970 if (r)
971 return r;
972 } else if (!ctx->has_msg_cmd) {
973 DRM_ERROR("Message needed before other commands are send!\n");
974 return -EINVAL;
975 }
976
977 return 0;
978 }
979
980 /**
981 * amdgpu_uvd_cs_reg - parse register writes
982 *
983 * @ctx: UVD parser context
984 * @cb: callback function
985 *
986 * Parse the register writes, call cb on each complete command.
987 */
amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx * ctx,int (* cb)(struct amdgpu_uvd_cs_ctx * ctx))988 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
989 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
990 {
991 int i, r;
992
993 ctx->idx++;
994 for (i = 0; i <= ctx->count; ++i) {
995 unsigned int reg = ctx->reg + i;
996
997 if (ctx->idx >= ctx->ib->length_dw) {
998 DRM_ERROR("Register command after end of CS!\n");
999 return -EINVAL;
1000 }
1001
1002 switch (reg) {
1003 case mmUVD_GPCOM_VCPU_DATA0:
1004 ctx->data0 = ctx->idx;
1005 break;
1006 case mmUVD_GPCOM_VCPU_DATA1:
1007 ctx->data1 = ctx->idx;
1008 break;
1009 case mmUVD_GPCOM_VCPU_CMD:
1010 r = cb(ctx);
1011 if (r)
1012 return r;
1013 break;
1014 case mmUVD_ENGINE_CNTL:
1015 case mmUVD_NO_OP:
1016 break;
1017 default:
1018 DRM_ERROR("Invalid reg 0x%X!\n", reg);
1019 return -EINVAL;
1020 }
1021 ctx->idx++;
1022 }
1023 return 0;
1024 }
1025
1026 /**
1027 * amdgpu_uvd_cs_packets - parse UVD packets
1028 *
1029 * @ctx: UVD parser context
1030 * @cb: callback function
1031 *
1032 * Parse the command stream packets.
1033 */
amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx * ctx,int (* cb)(struct amdgpu_uvd_cs_ctx * ctx))1034 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
1035 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
1036 {
1037 int r;
1038
1039 for (ctx->idx = 0 ; ctx->idx < ctx->ib->length_dw; ) {
1040 uint32_t cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx);
1041 unsigned int type = CP_PACKET_GET_TYPE(cmd);
1042
1043 switch (type) {
1044 case PACKET_TYPE0:
1045 ctx->reg = CP_PACKET0_GET_REG(cmd);
1046 ctx->count = CP_PACKET_GET_COUNT(cmd);
1047 r = amdgpu_uvd_cs_reg(ctx, cb);
1048 if (r)
1049 return r;
1050 break;
1051 case PACKET_TYPE2:
1052 ++ctx->idx;
1053 break;
1054 default:
1055 DRM_ERROR("Unknown packet type %d !\n", type);
1056 return -EINVAL;
1057 }
1058 }
1059 return 0;
1060 }
1061
1062 /**
1063 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
1064 *
1065 * @parser: Command submission parser context
1066 * @job: the job to parse
1067 * @ib: the IB to patch
1068 *
1069 * Parse the command stream, patch in addresses as necessary.
1070 */
amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser * parser,struct amdgpu_job * job,struct amdgpu_ib * ib)1071 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
1072 struct amdgpu_job *job,
1073 struct amdgpu_ib *ib)
1074 {
1075 struct amdgpu_uvd_cs_ctx ctx = {};
1076 unsigned int buf_sizes[] = {
1077 [0x00000000] = 2048,
1078 [0x00000001] = 0xFFFFFFFF,
1079 [0x00000002] = 0xFFFFFFFF,
1080 [0x00000003] = 2048,
1081 [0x00000004] = 0xFFFFFFFF,
1082 };
1083 int r;
1084
1085 job->vm = NULL;
1086 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
1087
1088 if (ib->length_dw % 16) {
1089 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
1090 ib->length_dw);
1091 return -EINVAL;
1092 }
1093
1094 ctx.parser = parser;
1095 ctx.buf_sizes = buf_sizes;
1096 ctx.ib = ib;
1097
1098 /* first round only required on chips without UVD 64 bit address support */
1099 if (!parser->adev->uvd.address_64_bit) {
1100 /* first round, make sure the buffers are actually in the UVD segment */
1101 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
1102 if (r)
1103 return r;
1104 }
1105
1106 /* second round, patch buffer addresses into the command stream */
1107 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
1108 if (r)
1109 return r;
1110
1111 if (!ctx.has_msg_cmd) {
1112 DRM_ERROR("UVD-IBs need a msg command!\n");
1113 return -EINVAL;
1114 }
1115
1116 return 0;
1117 }
1118
amdgpu_uvd_send_msg(struct amdgpu_ring * ring,struct amdgpu_bo * bo,bool direct,struct dma_fence ** fence)1119 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1120 bool direct, struct dma_fence **fence)
1121 {
1122 struct amdgpu_device *adev = ring->adev;
1123 struct dma_fence *f = NULL;
1124 uint32_t offset, data[4];
1125 struct amdgpu_job *job;
1126 struct amdgpu_ib *ib;
1127 uint64_t addr;
1128 int i, r;
1129
1130 r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
1131 AMDGPU_FENCE_OWNER_UNDEFINED,
1132 64, direct ? AMDGPU_IB_POOL_DIRECT :
1133 AMDGPU_IB_POOL_DELAYED, &job);
1134 if (r)
1135 return r;
1136
1137 if (adev->asic_type >= CHIP_VEGA10)
1138 offset = adev->reg_offset[UVD_HWIP][ring->me][1];
1139 else
1140 offset = UVD_BASE_SI;
1141
1142 data[0] = PACKET0(offset + UVD_GPCOM_VCPU_DATA0, 0);
1143 data[1] = PACKET0(offset + UVD_GPCOM_VCPU_DATA1, 0);
1144 data[2] = PACKET0(offset + UVD_GPCOM_VCPU_CMD, 0);
1145 data[3] = PACKET0(offset + UVD_NO_OP, 0);
1146
1147 ib = &job->ibs[0];
1148 addr = amdgpu_bo_gpu_offset(bo);
1149 ib->ptr[0] = data[0];
1150 ib->ptr[1] = addr;
1151 ib->ptr[2] = data[1];
1152 ib->ptr[3] = addr >> 32;
1153 ib->ptr[4] = data[2];
1154 ib->ptr[5] = 0;
1155 for (i = 6; i < 16; i += 2) {
1156 ib->ptr[i] = data[3];
1157 ib->ptr[i+1] = 0;
1158 }
1159 ib->length_dw = 16;
1160
1161 if (direct) {
1162 r = amdgpu_job_submit_direct(job, ring, &f);
1163 if (r)
1164 goto err_free;
1165 } else {
1166 r = drm_sched_job_add_resv_dependencies(&job->base,
1167 bo->tbo.base.resv,
1168 DMA_RESV_USAGE_KERNEL);
1169 if (r)
1170 goto err_free;
1171
1172 f = amdgpu_job_submit(job);
1173 }
1174
1175 amdgpu_bo_reserve(bo, true);
1176 amdgpu_bo_fence(bo, f, false);
1177 amdgpu_bo_unreserve(bo);
1178
1179 if (fence)
1180 *fence = dma_fence_get(f);
1181 dma_fence_put(f);
1182
1183 return 0;
1184
1185 err_free:
1186 amdgpu_job_free(job);
1187 return r;
1188 }
1189
1190 /* multiple fence commands without any stream commands in between can
1191 * crash the vcpu so just try to emmit a dummy create/destroy msg to
1192 * avoid this
1193 */
amdgpu_uvd_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)1194 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1195 struct dma_fence **fence)
1196 {
1197 struct amdgpu_device *adev = ring->adev;
1198 struct amdgpu_bo *bo = adev->uvd.ib_bo;
1199 uint32_t *msg;
1200 int i;
1201
1202 msg = amdgpu_bo_kptr(bo);
1203 /* stitch together an UVD create msg */
1204 msg[0] = cpu_to_le32(0x00000de4);
1205 msg[1] = cpu_to_le32(0x00000000);
1206 msg[2] = cpu_to_le32(handle);
1207 msg[3] = cpu_to_le32(0x00000000);
1208 msg[4] = cpu_to_le32(0x00000000);
1209 msg[5] = cpu_to_le32(0x00000000);
1210 msg[6] = cpu_to_le32(0x00000000);
1211 msg[7] = cpu_to_le32(0x00000780);
1212 msg[8] = cpu_to_le32(0x00000440);
1213 msg[9] = cpu_to_le32(0x00000000);
1214 msg[10] = cpu_to_le32(0x01b37000);
1215 for (i = 11; i < 1024; ++i)
1216 msg[i] = cpu_to_le32(0x0);
1217
1218 return amdgpu_uvd_send_msg(ring, bo, true, fence);
1219
1220 }
1221
amdgpu_uvd_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,bool direct,struct dma_fence ** fence)1222 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1223 bool direct, struct dma_fence **fence)
1224 {
1225 struct amdgpu_device *adev = ring->adev;
1226 struct amdgpu_bo *bo = NULL;
1227 uint32_t *msg;
1228 int r, i;
1229
1230 if (direct) {
1231 bo = adev->uvd.ib_bo;
1232 } else {
1233 r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo);
1234 if (r)
1235 return r;
1236 }
1237
1238 msg = amdgpu_bo_kptr(bo);
1239 /* stitch together an UVD destroy msg */
1240 msg[0] = cpu_to_le32(0x00000de4);
1241 msg[1] = cpu_to_le32(0x00000002);
1242 msg[2] = cpu_to_le32(handle);
1243 msg[3] = cpu_to_le32(0x00000000);
1244 for (i = 4; i < 1024; ++i)
1245 msg[i] = cpu_to_le32(0x0);
1246
1247 r = amdgpu_uvd_send_msg(ring, bo, direct, fence);
1248
1249 if (!direct)
1250 amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
1251
1252 return r;
1253 }
1254
amdgpu_uvd_idle_work_handler(struct work_struct * work)1255 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1256 {
1257 struct amdgpu_device *adev =
1258 container_of(work, struct amdgpu_device, uvd.idle_work.work);
1259 unsigned int fences = 0, i, j;
1260
1261 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1262 if (adev->uvd.harvest_config & (1 << i))
1263 continue;
1264 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
1265 for (j = 0; j < adev->uvd.num_enc_rings; ++j)
1266 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
1267 }
1268
1269 if (fences == 0) {
1270 if (adev->pm.dpm_enabled) {
1271 amdgpu_dpm_enable_uvd(adev, false);
1272 } else {
1273 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1274 /* shutdown the UVD block */
1275 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1276 AMD_PG_STATE_GATE);
1277 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1278 AMD_CG_STATE_GATE);
1279 }
1280 } else {
1281 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1282 }
1283 }
1284
amdgpu_uvd_ring_begin_use(struct amdgpu_ring * ring)1285 void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1286 {
1287 struct amdgpu_device *adev = ring->adev;
1288 bool set_clocks;
1289
1290 if (amdgpu_sriov_vf(adev))
1291 return;
1292
1293 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1294 if (set_clocks) {
1295 if (adev->pm.dpm_enabled) {
1296 amdgpu_dpm_enable_uvd(adev, true);
1297 } else {
1298 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1299 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1300 AMD_CG_STATE_UNGATE);
1301 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1302 AMD_PG_STATE_UNGATE);
1303 }
1304 }
1305 }
1306
amdgpu_uvd_ring_end_use(struct amdgpu_ring * ring)1307 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1308 {
1309 if (!amdgpu_sriov_vf(ring->adev))
1310 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1311 }
1312
1313 /**
1314 * amdgpu_uvd_ring_test_ib - test ib execution
1315 *
1316 * @ring: amdgpu_ring pointer
1317 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1318 *
1319 * Test if we can successfully execute an IB
1320 */
amdgpu_uvd_ring_test_ib(struct amdgpu_ring * ring,long timeout)1321 int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1322 {
1323 struct dma_fence *fence;
1324 long r;
1325
1326 r = amdgpu_uvd_get_create_msg(ring, 1, &fence);
1327 if (r)
1328 goto error;
1329
1330 r = dma_fence_wait_timeout(fence, false, timeout);
1331 dma_fence_put(fence);
1332 if (r == 0)
1333 r = -ETIMEDOUT;
1334 if (r < 0)
1335 goto error;
1336
1337 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1338 if (r)
1339 goto error;
1340
1341 r = dma_fence_wait_timeout(fence, false, timeout);
1342 if (r == 0)
1343 r = -ETIMEDOUT;
1344 else if (r > 0)
1345 r = 0;
1346
1347 dma_fence_put(fence);
1348
1349 error:
1350 return r;
1351 }
1352
1353 /**
1354 * amdgpu_uvd_used_handles - returns used UVD handles
1355 *
1356 * @adev: amdgpu_device pointer
1357 *
1358 * Returns the number of UVD handles in use
1359 */
amdgpu_uvd_used_handles(struct amdgpu_device * adev)1360 uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1361 {
1362 unsigned int i;
1363 uint32_t used_handles = 0;
1364
1365 for (i = 0; i < adev->uvd.max_handles; ++i) {
1366 /*
1367 * Handles can be freed in any order, and not
1368 * necessarily linear. So we need to count
1369 * all non-zero handles.
1370 */
1371 if (atomic_read(&adev->uvd.handles[i]))
1372 used_handles++;
1373 }
1374
1375 return used_handles;
1376 }
1377