1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25 #include <linux/delay.h>
26 #include <linux/firmware.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_uvd.h"
30 #include "vid.h"
31 #include "uvd/uvd_5_0_d.h"
32 #include "uvd/uvd_5_0_sh_mask.h"
33 #include "oss/oss_2_0_d.h"
34 #include "oss/oss_2_0_sh_mask.h"
35 #include "bif/bif_5_0_d.h"
36 #include "vi.h"
37 #include "smu/smu_7_1_2_d.h"
38 #include "smu/smu_7_1_2_sh_mask.h"
39 #include "ivsrcid/ivsrcid_vislands30.h"
40
41 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
42 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
43 static int uvd_v5_0_start(struct amdgpu_device *adev);
44 static void uvd_v5_0_stop(struct amdgpu_device *adev);
45 static int uvd_v5_0_set_clockgating_state(void *handle,
46 enum amd_clockgating_state state);
47 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
48 bool enable);
49 /**
50 * uvd_v5_0_ring_get_rptr - get read pointer
51 *
52 * @ring: amdgpu_ring pointer
53 *
54 * Returns the current hardware read pointer
55 */
uvd_v5_0_ring_get_rptr(struct amdgpu_ring * ring)56 static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
57 {
58 struct amdgpu_device *adev = ring->adev;
59
60 return RREG32(mmUVD_RBC_RB_RPTR);
61 }
62
63 /**
64 * uvd_v5_0_ring_get_wptr - get write pointer
65 *
66 * @ring: amdgpu_ring pointer
67 *
68 * Returns the current hardware write pointer
69 */
uvd_v5_0_ring_get_wptr(struct amdgpu_ring * ring)70 static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
71 {
72 struct amdgpu_device *adev = ring->adev;
73
74 return RREG32(mmUVD_RBC_RB_WPTR);
75 }
76
77 /**
78 * uvd_v5_0_ring_set_wptr - set write pointer
79 *
80 * @ring: amdgpu_ring pointer
81 *
82 * Commits the write pointer to the hardware
83 */
uvd_v5_0_ring_set_wptr(struct amdgpu_ring * ring)84 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
85 {
86 struct amdgpu_device *adev = ring->adev;
87
88 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
89 }
90
uvd_v5_0_early_init(void * handle)91 static int uvd_v5_0_early_init(void *handle)
92 {
93 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
94 adev->uvd.num_uvd_inst = 1;
95
96 uvd_v5_0_set_ring_funcs(adev);
97 uvd_v5_0_set_irq_funcs(adev);
98
99 return 0;
100 }
101
uvd_v5_0_sw_init(void * handle)102 static int uvd_v5_0_sw_init(void *handle)
103 {
104 struct amdgpu_ring *ring;
105 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
106 int r;
107
108 /* UVD TRAP */
109 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
110 if (r)
111 return r;
112
113 r = amdgpu_uvd_sw_init(adev);
114 if (r)
115 return r;
116
117 ring = &adev->uvd.inst->ring;
118 sprintf(ring->name, "uvd");
119 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
120 AMDGPU_RING_PRIO_DEFAULT, NULL);
121 if (r)
122 return r;
123
124 r = amdgpu_uvd_resume(adev);
125 if (r)
126 return r;
127
128 r = amdgpu_uvd_entity_init(adev);
129
130 return r;
131 }
132
uvd_v5_0_sw_fini(void * handle)133 static int uvd_v5_0_sw_fini(void *handle)
134 {
135 int r;
136 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
137
138 r = amdgpu_uvd_suspend(adev);
139 if (r)
140 return r;
141
142 return amdgpu_uvd_sw_fini(adev);
143 }
144
145 /**
146 * uvd_v5_0_hw_init - start and test UVD block
147 *
148 * @handle: handle used to pass amdgpu_device pointer
149 *
150 * Initialize the hardware, boot up the VCPU and do some testing
151 */
uvd_v5_0_hw_init(void * handle)152 static int uvd_v5_0_hw_init(void *handle)
153 {
154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
155 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
156 uint32_t tmp;
157 int r;
158
159 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
160 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
161 uvd_v5_0_enable_mgcg(adev, true);
162
163 r = amdgpu_ring_test_helper(ring);
164 if (r)
165 goto done;
166
167 r = amdgpu_ring_alloc(ring, 10);
168 if (r) {
169 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
170 goto done;
171 }
172
173 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
174 amdgpu_ring_write(ring, tmp);
175 amdgpu_ring_write(ring, 0xFFFFF);
176
177 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
178 amdgpu_ring_write(ring, tmp);
179 amdgpu_ring_write(ring, 0xFFFFF);
180
181 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
182 amdgpu_ring_write(ring, tmp);
183 amdgpu_ring_write(ring, 0xFFFFF);
184
185 /* Clear timeout status bits */
186 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
187 amdgpu_ring_write(ring, 0x8);
188
189 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
190 amdgpu_ring_write(ring, 3);
191
192 amdgpu_ring_commit(ring);
193
194 done:
195 if (!r)
196 DRM_INFO("UVD initialized successfully.\n");
197
198 return r;
199
200 }
201
202 /**
203 * uvd_v5_0_hw_fini - stop the hardware block
204 *
205 * @handle: handle used to pass amdgpu_device pointer
206 *
207 * Stop the UVD block, mark ring as not ready any more
208 */
uvd_v5_0_hw_fini(void * handle)209 static int uvd_v5_0_hw_fini(void *handle)
210 {
211 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
212
213 cancel_delayed_work_sync(&adev->uvd.idle_work);
214
215 if (RREG32(mmUVD_STATUS) != 0)
216 uvd_v5_0_stop(adev);
217
218 return 0;
219 }
220
uvd_v5_0_suspend(void * handle)221 static int uvd_v5_0_suspend(void *handle)
222 {
223 int r;
224 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
225
226 /*
227 * Proper cleanups before halting the HW engine:
228 * - cancel the delayed idle work
229 * - enable powergating
230 * - enable clockgating
231 * - disable dpm
232 *
233 * TODO: to align with the VCN implementation, move the
234 * jobs for clockgating/powergating/dpm setting to
235 * ->set_powergating_state().
236 */
237 cancel_delayed_work_sync(&adev->uvd.idle_work);
238
239 if (adev->pm.dpm_enabled) {
240 amdgpu_dpm_enable_uvd(adev, false);
241 } else {
242 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
243 /* shutdown the UVD block */
244 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
245 AMD_PG_STATE_GATE);
246 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
247 AMD_CG_STATE_GATE);
248 }
249
250 r = uvd_v5_0_hw_fini(adev);
251 if (r)
252 return r;
253
254 return amdgpu_uvd_suspend(adev);
255 }
256
uvd_v5_0_resume(void * handle)257 static int uvd_v5_0_resume(void *handle)
258 {
259 int r;
260 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
261
262 r = amdgpu_uvd_resume(adev);
263 if (r)
264 return r;
265
266 return uvd_v5_0_hw_init(adev);
267 }
268
269 /**
270 * uvd_v5_0_mc_resume - memory controller programming
271 *
272 * @adev: amdgpu_device pointer
273 *
274 * Let the UVD memory controller know it's offsets
275 */
uvd_v5_0_mc_resume(struct amdgpu_device * adev)276 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
277 {
278 uint64_t offset;
279 uint32_t size;
280
281 /* program memory controller bits 0-27 */
282 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
283 lower_32_bits(adev->uvd.inst->gpu_addr));
284 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
285 upper_32_bits(adev->uvd.inst->gpu_addr));
286
287 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
288 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
289 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
290 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
291
292 offset += size;
293 size = AMDGPU_UVD_HEAP_SIZE;
294 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
295 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
296
297 offset += size;
298 size = AMDGPU_UVD_STACK_SIZE +
299 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
300 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
301 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
302
303 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
304 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
305 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
306 }
307
308 /**
309 * uvd_v5_0_start - start UVD block
310 *
311 * @adev: amdgpu_device pointer
312 *
313 * Setup and start the UVD block
314 */
uvd_v5_0_start(struct amdgpu_device * adev)315 static int uvd_v5_0_start(struct amdgpu_device *adev)
316 {
317 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
318 uint32_t rb_bufsz, tmp;
319 uint32_t lmi_swap_cntl;
320 uint32_t mp_swap_cntl;
321 int i, j, r;
322
323 /*disable DPG */
324 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
325
326 /* disable byte swapping */
327 lmi_swap_cntl = 0;
328 mp_swap_cntl = 0;
329
330 uvd_v5_0_mc_resume(adev);
331
332 /* disable interupt */
333 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
334
335 /* stall UMC and register bus before resetting VCPU */
336 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
337 mdelay(1);
338
339 /* put LMI, VCPU, RBC etc... into reset */
340 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
341 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
342 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
343 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
344 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
345 mdelay(5);
346
347 /* take UVD block out of reset */
348 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
349 mdelay(5);
350
351 /* initialize UVD memory controller */
352 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
353 (1 << 21) | (1 << 9) | (1 << 20));
354
355 #ifdef __BIG_ENDIAN
356 /* swap (8 in 32) RB and IB */
357 lmi_swap_cntl = 0xa;
358 mp_swap_cntl = 0;
359 #endif
360 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
361 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
362
363 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
364 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
365 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
366 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
367 WREG32(mmUVD_MPC_SET_ALU, 0);
368 WREG32(mmUVD_MPC_SET_MUX, 0x88);
369
370 /* take all subblocks out of reset, except VCPU */
371 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
372 mdelay(5);
373
374 /* enable VCPU clock */
375 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
376
377 /* enable UMC */
378 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
379
380 /* boot up the VCPU */
381 WREG32(mmUVD_SOFT_RESET, 0);
382 mdelay(10);
383
384 for (i = 0; i < 10; ++i) {
385 uint32_t status;
386 for (j = 0; j < 100; ++j) {
387 status = RREG32(mmUVD_STATUS);
388 if (status & 2)
389 break;
390 mdelay(10);
391 }
392 r = 0;
393 if (status & 2)
394 break;
395
396 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
397 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
398 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
399 mdelay(10);
400 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
401 mdelay(10);
402 r = -1;
403 }
404
405 if (r) {
406 DRM_ERROR("UVD not responding, giving up!!!\n");
407 return r;
408 }
409 /* enable master interrupt */
410 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
411
412 /* clear the bit 4 of UVD_STATUS */
413 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
414
415 rb_bufsz = order_base_2(ring->ring_size);
416 tmp = 0;
417 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
418 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
419 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
420 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
421 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
422 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
423 /* force RBC into idle state */
424 WREG32(mmUVD_RBC_RB_CNTL, tmp);
425
426 /* set the write pointer delay */
427 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
428
429 /* set the wb address */
430 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
431
432 /* program the RB_BASE for ring buffer */
433 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
434 lower_32_bits(ring->gpu_addr));
435 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
436 upper_32_bits(ring->gpu_addr));
437
438 /* Initialize the ring buffer's read and write pointers */
439 WREG32(mmUVD_RBC_RB_RPTR, 0);
440
441 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
442 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
443
444 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
445
446 return 0;
447 }
448
449 /**
450 * uvd_v5_0_stop - stop UVD block
451 *
452 * @adev: amdgpu_device pointer
453 *
454 * stop the UVD block
455 */
uvd_v5_0_stop(struct amdgpu_device * adev)456 static void uvd_v5_0_stop(struct amdgpu_device *adev)
457 {
458 /* force RBC into idle state */
459 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
460
461 /* Stall UMC and register bus before resetting VCPU */
462 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
463 mdelay(1);
464
465 /* put VCPU into reset */
466 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
467 mdelay(5);
468
469 /* disable VCPU clock */
470 WREG32(mmUVD_VCPU_CNTL, 0x0);
471
472 /* Unstall UMC and register bus */
473 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
474
475 WREG32(mmUVD_STATUS, 0);
476 }
477
478 /**
479 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
480 *
481 * @ring: amdgpu_ring pointer
482 * @addr: address
483 * @seq: sequence number
484 * @flags: fence related flags
485 *
486 * Write a fence and a trap command to the ring.
487 */
uvd_v5_0_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)488 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
489 unsigned flags)
490 {
491 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
492
493 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
494 amdgpu_ring_write(ring, seq);
495 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
496 amdgpu_ring_write(ring, addr & 0xffffffff);
497 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
498 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
499 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
500 amdgpu_ring_write(ring, 0);
501
502 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
503 amdgpu_ring_write(ring, 0);
504 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
505 amdgpu_ring_write(ring, 0);
506 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
507 amdgpu_ring_write(ring, 2);
508 }
509
510 /**
511 * uvd_v5_0_ring_test_ring - register write test
512 *
513 * @ring: amdgpu_ring pointer
514 *
515 * Test if we can successfully write to the context register
516 */
uvd_v5_0_ring_test_ring(struct amdgpu_ring * ring)517 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
518 {
519 struct amdgpu_device *adev = ring->adev;
520 uint32_t tmp = 0;
521 unsigned i;
522 int r;
523
524 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
525 r = amdgpu_ring_alloc(ring, 3);
526 if (r)
527 return r;
528 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
529 amdgpu_ring_write(ring, 0xDEADBEEF);
530 amdgpu_ring_commit(ring);
531 for (i = 0; i < adev->usec_timeout; i++) {
532 tmp = RREG32(mmUVD_CONTEXT_ID);
533 if (tmp == 0xDEADBEEF)
534 break;
535 udelay(1);
536 }
537
538 if (i >= adev->usec_timeout)
539 r = -ETIMEDOUT;
540
541 return r;
542 }
543
544 /**
545 * uvd_v5_0_ring_emit_ib - execute indirect buffer
546 *
547 * @ring: amdgpu_ring pointer
548 * @job: job to retrieve vmid from
549 * @ib: indirect buffer to execute
550 * @flags: unused
551 *
552 * Write ring commands to execute the indirect buffer
553 */
uvd_v5_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)554 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
555 struct amdgpu_job *job,
556 struct amdgpu_ib *ib,
557 uint32_t flags)
558 {
559 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
560 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
561 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
562 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
563 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
564 amdgpu_ring_write(ring, ib->length_dw);
565 }
566
uvd_v5_0_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)567 static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
568 {
569 int i;
570
571 WARN_ON(ring->wptr % 2 || count % 2);
572
573 for (i = 0; i < count / 2; i++) {
574 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
575 amdgpu_ring_write(ring, 0);
576 }
577 }
578
uvd_v5_0_is_idle(void * handle)579 static bool uvd_v5_0_is_idle(void *handle)
580 {
581 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
582
583 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
584 }
585
uvd_v5_0_wait_for_idle(void * handle)586 static int uvd_v5_0_wait_for_idle(void *handle)
587 {
588 unsigned i;
589 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
590
591 for (i = 0; i < adev->usec_timeout; i++) {
592 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
593 return 0;
594 }
595 return -ETIMEDOUT;
596 }
597
uvd_v5_0_soft_reset(void * handle)598 static int uvd_v5_0_soft_reset(void *handle)
599 {
600 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
601
602 uvd_v5_0_stop(adev);
603
604 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
605 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
606 mdelay(5);
607
608 return uvd_v5_0_start(adev);
609 }
610
uvd_v5_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)611 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
612 struct amdgpu_irq_src *source,
613 unsigned type,
614 enum amdgpu_interrupt_state state)
615 {
616 // TODO
617 return 0;
618 }
619
uvd_v5_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)620 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
621 struct amdgpu_irq_src *source,
622 struct amdgpu_iv_entry *entry)
623 {
624 DRM_DEBUG("IH: UVD TRAP\n");
625 amdgpu_fence_process(&adev->uvd.inst->ring);
626 return 0;
627 }
628
uvd_v5_0_enable_clock_gating(struct amdgpu_device * adev,bool enable)629 static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
630 {
631 uint32_t data1, data3, suvd_flags;
632
633 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
634 data3 = RREG32(mmUVD_CGC_GATE);
635
636 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
637 UVD_SUVD_CGC_GATE__SIT_MASK |
638 UVD_SUVD_CGC_GATE__SMP_MASK |
639 UVD_SUVD_CGC_GATE__SCM_MASK |
640 UVD_SUVD_CGC_GATE__SDB_MASK;
641
642 if (enable) {
643 data3 |= (UVD_CGC_GATE__SYS_MASK |
644 UVD_CGC_GATE__UDEC_MASK |
645 UVD_CGC_GATE__MPEG2_MASK |
646 UVD_CGC_GATE__RBC_MASK |
647 UVD_CGC_GATE__LMI_MC_MASK |
648 UVD_CGC_GATE__IDCT_MASK |
649 UVD_CGC_GATE__MPRD_MASK |
650 UVD_CGC_GATE__MPC_MASK |
651 UVD_CGC_GATE__LBSI_MASK |
652 UVD_CGC_GATE__LRBBM_MASK |
653 UVD_CGC_GATE__UDEC_RE_MASK |
654 UVD_CGC_GATE__UDEC_CM_MASK |
655 UVD_CGC_GATE__UDEC_IT_MASK |
656 UVD_CGC_GATE__UDEC_DB_MASK |
657 UVD_CGC_GATE__UDEC_MP_MASK |
658 UVD_CGC_GATE__WCB_MASK |
659 UVD_CGC_GATE__JPEG_MASK |
660 UVD_CGC_GATE__SCPU_MASK);
661 /* only in pg enabled, we can gate clock to vcpu*/
662 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
663 data3 |= UVD_CGC_GATE__VCPU_MASK;
664 data3 &= ~UVD_CGC_GATE__REGS_MASK;
665 data1 |= suvd_flags;
666 } else {
667 data3 = 0;
668 data1 = 0;
669 }
670
671 WREG32(mmUVD_SUVD_CGC_GATE, data1);
672 WREG32(mmUVD_CGC_GATE, data3);
673 }
674
uvd_v5_0_set_sw_clock_gating(struct amdgpu_device * adev)675 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
676 {
677 uint32_t data, data2;
678
679 data = RREG32(mmUVD_CGC_CTRL);
680 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
681
682
683 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
684 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
685
686
687 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
688 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
689 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
690
691 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
692 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
693 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
694 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
695 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
696 UVD_CGC_CTRL__SYS_MODE_MASK |
697 UVD_CGC_CTRL__UDEC_MODE_MASK |
698 UVD_CGC_CTRL__MPEG2_MODE_MASK |
699 UVD_CGC_CTRL__REGS_MODE_MASK |
700 UVD_CGC_CTRL__RBC_MODE_MASK |
701 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
702 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
703 UVD_CGC_CTRL__IDCT_MODE_MASK |
704 UVD_CGC_CTRL__MPRD_MODE_MASK |
705 UVD_CGC_CTRL__MPC_MODE_MASK |
706 UVD_CGC_CTRL__LBSI_MODE_MASK |
707 UVD_CGC_CTRL__LRBBM_MODE_MASK |
708 UVD_CGC_CTRL__WCB_MODE_MASK |
709 UVD_CGC_CTRL__VCPU_MODE_MASK |
710 UVD_CGC_CTRL__JPEG_MODE_MASK |
711 UVD_CGC_CTRL__SCPU_MODE_MASK);
712 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
713 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
714 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
715 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
716 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
717
718 WREG32(mmUVD_CGC_CTRL, data);
719 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
720 }
721
722 #if 0
723 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
724 {
725 uint32_t data, data1, cgc_flags, suvd_flags;
726
727 data = RREG32(mmUVD_CGC_GATE);
728 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
729
730 cgc_flags = UVD_CGC_GATE__SYS_MASK |
731 UVD_CGC_GATE__UDEC_MASK |
732 UVD_CGC_GATE__MPEG2_MASK |
733 UVD_CGC_GATE__RBC_MASK |
734 UVD_CGC_GATE__LMI_MC_MASK |
735 UVD_CGC_GATE__IDCT_MASK |
736 UVD_CGC_GATE__MPRD_MASK |
737 UVD_CGC_GATE__MPC_MASK |
738 UVD_CGC_GATE__LBSI_MASK |
739 UVD_CGC_GATE__LRBBM_MASK |
740 UVD_CGC_GATE__UDEC_RE_MASK |
741 UVD_CGC_GATE__UDEC_CM_MASK |
742 UVD_CGC_GATE__UDEC_IT_MASK |
743 UVD_CGC_GATE__UDEC_DB_MASK |
744 UVD_CGC_GATE__UDEC_MP_MASK |
745 UVD_CGC_GATE__WCB_MASK |
746 UVD_CGC_GATE__VCPU_MASK |
747 UVD_CGC_GATE__SCPU_MASK;
748
749 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
750 UVD_SUVD_CGC_GATE__SIT_MASK |
751 UVD_SUVD_CGC_GATE__SMP_MASK |
752 UVD_SUVD_CGC_GATE__SCM_MASK |
753 UVD_SUVD_CGC_GATE__SDB_MASK;
754
755 data |= cgc_flags;
756 data1 |= suvd_flags;
757
758 WREG32(mmUVD_CGC_GATE, data);
759 WREG32(mmUVD_SUVD_CGC_GATE, data1);
760 }
761 #endif
762
uvd_v5_0_enable_mgcg(struct amdgpu_device * adev,bool enable)763 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
764 bool enable)
765 {
766 u32 orig, data;
767
768 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
769 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
770 data |= 0xfff;
771 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
772
773 orig = data = RREG32(mmUVD_CGC_CTRL);
774 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
775 if (orig != data)
776 WREG32(mmUVD_CGC_CTRL, data);
777 } else {
778 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
779 data &= ~0xfff;
780 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
781
782 orig = data = RREG32(mmUVD_CGC_CTRL);
783 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
784 if (orig != data)
785 WREG32(mmUVD_CGC_CTRL, data);
786 }
787 }
788
uvd_v5_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)789 static int uvd_v5_0_set_clockgating_state(void *handle,
790 enum amd_clockgating_state state)
791 {
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793 bool enable = (state == AMD_CG_STATE_GATE);
794
795 if (enable) {
796 /* wait for STATUS to clear */
797 if (uvd_v5_0_wait_for_idle(handle))
798 return -EBUSY;
799 uvd_v5_0_enable_clock_gating(adev, true);
800
801 /* enable HW gates because UVD is idle */
802 /* uvd_v5_0_set_hw_clock_gating(adev); */
803 } else {
804 uvd_v5_0_enable_clock_gating(adev, false);
805 }
806
807 uvd_v5_0_set_sw_clock_gating(adev);
808 return 0;
809 }
810
uvd_v5_0_set_powergating_state(void * handle,enum amd_powergating_state state)811 static int uvd_v5_0_set_powergating_state(void *handle,
812 enum amd_powergating_state state)
813 {
814 /* This doesn't actually powergate the UVD block.
815 * That's done in the dpm code via the SMC. This
816 * just re-inits the block as necessary. The actual
817 * gating still happens in the dpm code. We should
818 * revisit this when there is a cleaner line between
819 * the smc and the hw blocks
820 */
821 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
822 int ret = 0;
823
824 if (state == AMD_PG_STATE_GATE) {
825 uvd_v5_0_stop(adev);
826 } else {
827 ret = uvd_v5_0_start(adev);
828 if (ret)
829 goto out;
830 }
831
832 out:
833 return ret;
834 }
835
uvd_v5_0_get_clockgating_state(void * handle,u64 * flags)836 static void uvd_v5_0_get_clockgating_state(void *handle, u64 *flags)
837 {
838 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
839 int data;
840
841 mutex_lock(&adev->pm.mutex);
842
843 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
844 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
845 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
846 goto out;
847 }
848
849 /* AMD_CG_SUPPORT_UVD_MGCG */
850 data = RREG32(mmUVD_CGC_CTRL);
851 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
852 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
853
854 out:
855 mutex_unlock(&adev->pm.mutex);
856 }
857
858 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
859 .name = "uvd_v5_0",
860 .early_init = uvd_v5_0_early_init,
861 .late_init = NULL,
862 .sw_init = uvd_v5_0_sw_init,
863 .sw_fini = uvd_v5_0_sw_fini,
864 .hw_init = uvd_v5_0_hw_init,
865 .hw_fini = uvd_v5_0_hw_fini,
866 .suspend = uvd_v5_0_suspend,
867 .resume = uvd_v5_0_resume,
868 .is_idle = uvd_v5_0_is_idle,
869 .wait_for_idle = uvd_v5_0_wait_for_idle,
870 .soft_reset = uvd_v5_0_soft_reset,
871 .set_clockgating_state = uvd_v5_0_set_clockgating_state,
872 .set_powergating_state = uvd_v5_0_set_powergating_state,
873 .get_clockgating_state = uvd_v5_0_get_clockgating_state,
874 };
875
876 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
877 .type = AMDGPU_RING_TYPE_UVD,
878 .align_mask = 0xf,
879 .support_64bit_ptrs = false,
880 .no_user_fence = true,
881 .get_rptr = uvd_v5_0_ring_get_rptr,
882 .get_wptr = uvd_v5_0_ring_get_wptr,
883 .set_wptr = uvd_v5_0_ring_set_wptr,
884 .parse_cs = amdgpu_uvd_ring_parse_cs,
885 .emit_frame_size =
886 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
887 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
888 .emit_ib = uvd_v5_0_ring_emit_ib,
889 .emit_fence = uvd_v5_0_ring_emit_fence,
890 .test_ring = uvd_v5_0_ring_test_ring,
891 .test_ib = amdgpu_uvd_ring_test_ib,
892 .insert_nop = uvd_v5_0_ring_insert_nop,
893 .pad_ib = amdgpu_ring_generic_pad_ib,
894 .begin_use = amdgpu_uvd_ring_begin_use,
895 .end_use = amdgpu_uvd_ring_end_use,
896 };
897
uvd_v5_0_set_ring_funcs(struct amdgpu_device * adev)898 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
899 {
900 adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
901 }
902
903 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
904 .set = uvd_v5_0_set_interrupt_state,
905 .process = uvd_v5_0_process_interrupt,
906 };
907
uvd_v5_0_set_irq_funcs(struct amdgpu_device * adev)908 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
909 {
910 adev->uvd.inst->irq.num_types = 1;
911 adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
912 }
913
914 const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
915 {
916 .type = AMD_IP_BLOCK_TYPE_UVD,
917 .major = 5,
918 .minor = 0,
919 .rev = 0,
920 .funcs = &uvd_v5_0_ip_funcs,
921 };
922