1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "amdgpu_pm.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "jpeg_v2_0.h" 30 31 #include "vcn/vcn_3_0_0_offset.h" 32 #include "vcn/vcn_3_0_0_sh_mask.h" 33 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" 34 35 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f 36 37 static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev); 38 static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev); 39 static int jpeg_v3_0_set_powergating_state(void *handle, 40 enum amd_powergating_state state); 41 42 /** 43 * jpeg_v3_0_early_init - set function pointers 44 * 45 * @handle: amdgpu_device pointer 46 * 47 * Set ring and irq function pointers 48 */ 49 static int jpeg_v3_0_early_init(void *handle) 50 { 51 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 52 53 if (adev->asic_type != CHIP_YELLOW_CARP) { 54 u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); 55 56 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) 57 return -ENOENT; 58 } 59 60 adev->jpeg.num_jpeg_inst = 1; 61 62 jpeg_v3_0_set_dec_ring_funcs(adev); 63 jpeg_v3_0_set_irq_funcs(adev); 64 65 return 0; 66 } 67 68 /** 69 * jpeg_v3_0_sw_init - sw init for JPEG block 70 * 71 * @handle: amdgpu_device pointer 72 * 73 * Load firmware and sw initialization 74 */ 75 static int jpeg_v3_0_sw_init(void *handle) 76 { 77 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 78 struct amdgpu_ring *ring; 79 int r; 80 81 /* JPEG TRAP */ 82 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 83 VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); 84 if (r) 85 return r; 86 87 r = amdgpu_jpeg_sw_init(adev); 88 if (r) 89 return r; 90 91 r = amdgpu_jpeg_resume(adev); 92 if (r) 93 return r; 94 95 ring = &adev->jpeg.inst->ring_dec; 96 ring->use_doorbell = true; 97 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; 98 sprintf(ring->name, "jpeg_dec"); 99 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, 100 AMDGPU_RING_PRIO_DEFAULT, NULL); 101 if (r) 102 return r; 103 104 adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; 105 adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); 106 107 return 0; 108 } 109 110 /** 111 * jpeg_v3_0_sw_fini - sw fini for JPEG block 112 * 113 * @handle: amdgpu_device pointer 114 * 115 * JPEG suspend and free up sw allocation 116 */ 117 static int jpeg_v3_0_sw_fini(void *handle) 118 { 119 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 120 int r; 121 122 r = amdgpu_jpeg_suspend(adev); 123 if (r) 124 return r; 125 126 r = amdgpu_jpeg_sw_fini(adev); 127 128 return r; 129 } 130 131 /** 132 * jpeg_v3_0_hw_init - start and test JPEG block 133 * 134 * @handle: amdgpu_device pointer 135 * 136 */ 137 static int jpeg_v3_0_hw_init(void *handle) 138 { 139 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 140 struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; 141 int r; 142 143 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 144 (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); 145 146 r = amdgpu_ring_test_helper(ring); 147 if (r) 148 return r; 149 150 DRM_INFO("JPEG decode initialized successfully.\n"); 151 152 return 0; 153 } 154 155 /** 156 * jpeg_v3_0_hw_fini - stop the hardware block 157 * 158 * @handle: amdgpu_device pointer 159 * 160 * Stop the JPEG block, mark ring as not ready any more 161 */ 162 static int jpeg_v3_0_hw_fini(void *handle) 163 { 164 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 165 166 cancel_delayed_work_sync(&adev->vcn.idle_work); 167 168 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && 169 RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) 170 jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE); 171 172 return 0; 173 } 174 175 /** 176 * jpeg_v3_0_suspend - suspend JPEG block 177 * 178 * @handle: amdgpu_device pointer 179 * 180 * HW fini and suspend JPEG block 181 */ 182 static int jpeg_v3_0_suspend(void *handle) 183 { 184 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 185 int r; 186 187 r = jpeg_v3_0_hw_fini(adev); 188 if (r) 189 return r; 190 191 r = amdgpu_jpeg_suspend(adev); 192 193 return r; 194 } 195 196 /** 197 * jpeg_v3_0_resume - resume JPEG block 198 * 199 * @handle: amdgpu_device pointer 200 * 201 * Resume firmware and hw init JPEG block 202 */ 203 static int jpeg_v3_0_resume(void *handle) 204 { 205 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 206 int r; 207 208 r = amdgpu_jpeg_resume(adev); 209 if (r) 210 return r; 211 212 r = jpeg_v3_0_hw_init(adev); 213 214 return r; 215 } 216 217 static void jpeg_v3_0_disable_clock_gating(struct amdgpu_device *adev) 218 { 219 uint32_t data = 0; 220 221 data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); 222 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) 223 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 224 else 225 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 226 227 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 228 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 229 WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); 230 231 data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); 232 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK 233 | JPEG_CGC_GATE__JPEG2_DEC_MASK 234 | JPEG_CGC_GATE__JPEG_ENC_MASK 235 | JPEG_CGC_GATE__JMCIF_MASK 236 | JPEG_CGC_GATE__JRBBM_MASK); 237 WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); 238 239 data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); 240 data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK 241 | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK 242 | JPEG_CGC_CTRL__JMCIF_MODE_MASK 243 | JPEG_CGC_CTRL__JRBBM_MODE_MASK); 244 WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); 245 } 246 247 static void jpeg_v3_0_enable_clock_gating(struct amdgpu_device *adev) 248 { 249 uint32_t data = 0; 250 251 data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); 252 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK 253 |JPEG_CGC_GATE__JPEG2_DEC_MASK 254 |JPEG_CGC_GATE__JPEG_ENC_MASK 255 |JPEG_CGC_GATE__JMCIF_MASK 256 |JPEG_CGC_GATE__JRBBM_MASK); 257 WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); 258 } 259 260 static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev) 261 { 262 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 263 uint32_t data = 0; 264 int r = 0; 265 266 data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; 267 WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); 268 269 r = SOC15_WAIT_ON_RREG(JPEG, 0, 270 mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, 271 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 272 273 if (r) { 274 DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); 275 return r; 276 } 277 } 278 279 /* disable anti hang mechanism */ 280 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0, 281 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 282 283 /* keep the JPEG in static PG mode */ 284 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0, 285 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); 286 287 return 0; 288 } 289 290 static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device *adev) 291 { 292 /* enable anti hang mechanism */ 293 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 294 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 295 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 296 297 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 298 uint32_t data = 0; 299 int r = 0; 300 301 data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; 302 WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); 303 304 r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS, 305 (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), 306 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 307 308 if (r) { 309 DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); 310 return r; 311 } 312 } 313 314 return 0; 315 } 316 317 /** 318 * jpeg_v3_0_start - start JPEG block 319 * 320 * @adev: amdgpu_device pointer 321 * 322 * Setup and start the JPEG block 323 */ 324 static int jpeg_v3_0_start(struct amdgpu_device *adev) 325 { 326 struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; 327 int r; 328 329 if (adev->pm.dpm_enabled) 330 amdgpu_dpm_enable_jpeg(adev, true); 331 332 /* disable power gating */ 333 r = jpeg_v3_0_disable_static_power_gating(adev); 334 if (r) 335 return r; 336 337 /* JPEG disable CGC */ 338 jpeg_v3_0_disable_clock_gating(adev); 339 340 /* MJPEG global tiling registers */ 341 WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, 342 adev->gfx.config.gb_addr_config); 343 WREG32_SOC15(JPEG, 0, mmJPEG_ENC_GFX10_ADDR_CONFIG, 344 adev->gfx.config.gb_addr_config); 345 346 /* enable JMI channel */ 347 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0, 348 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 349 350 /* enable System Interrupt for JRBC */ 351 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN), 352 JPEG_SYS_INT_EN__DJRBC_MASK, 353 ~JPEG_SYS_INT_EN__DJRBC_MASK); 354 355 WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0); 356 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); 357 WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 358 lower_32_bits(ring->gpu_addr)); 359 WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 360 upper_32_bits(ring->gpu_addr)); 361 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0); 362 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0); 363 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); 364 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); 365 ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); 366 367 return 0; 368 } 369 370 /** 371 * jpeg_v3_0_stop - stop JPEG block 372 * 373 * @adev: amdgpu_device pointer 374 * 375 * stop the JPEG block 376 */ 377 static int jpeg_v3_0_stop(struct amdgpu_device *adev) 378 { 379 int r; 380 381 /* reset JMI */ 382 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 383 UVD_JMI_CNTL__SOFT_RESET_MASK, 384 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 385 386 jpeg_v3_0_enable_clock_gating(adev); 387 388 /* enable power gating */ 389 r = jpeg_v3_0_enable_static_power_gating(adev); 390 if (r) 391 return r; 392 393 if (adev->pm.dpm_enabled) 394 amdgpu_dpm_enable_jpeg(adev, false); 395 396 return 0; 397 } 398 399 /** 400 * jpeg_v3_0_dec_ring_get_rptr - get read pointer 401 * 402 * @ring: amdgpu_ring pointer 403 * 404 * Returns the current hardware read pointer 405 */ 406 static uint64_t jpeg_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring) 407 { 408 struct amdgpu_device *adev = ring->adev; 409 410 return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR); 411 } 412 413 /** 414 * jpeg_v3_0_dec_ring_get_wptr - get write pointer 415 * 416 * @ring: amdgpu_ring pointer 417 * 418 * Returns the current hardware write pointer 419 */ 420 static uint64_t jpeg_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring) 421 { 422 struct amdgpu_device *adev = ring->adev; 423 424 if (ring->use_doorbell) 425 return adev->wb.wb[ring->wptr_offs]; 426 else 427 return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); 428 } 429 430 /** 431 * jpeg_v3_0_dec_ring_set_wptr - set write pointer 432 * 433 * @ring: amdgpu_ring pointer 434 * 435 * Commits the write pointer to the hardware 436 */ 437 static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) 438 { 439 struct amdgpu_device *adev = ring->adev; 440 441 if (ring->use_doorbell) { 442 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 443 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 444 } else { 445 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); 446 } 447 } 448 449 static bool jpeg_v3_0_is_idle(void *handle) 450 { 451 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 452 int ret = 1; 453 454 ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) & 455 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 456 UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); 457 458 return ret; 459 } 460 461 static int jpeg_v3_0_wait_for_idle(void *handle) 462 { 463 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 464 465 return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, 466 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 467 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 468 } 469 470 static int jpeg_v3_0_set_clockgating_state(void *handle, 471 enum amd_clockgating_state state) 472 { 473 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 474 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 475 476 if (enable) { 477 if (!jpeg_v3_0_is_idle(handle)) 478 return -EBUSY; 479 jpeg_v3_0_enable_clock_gating(adev); 480 } else { 481 jpeg_v3_0_disable_clock_gating(adev); 482 } 483 484 return 0; 485 } 486 487 static int jpeg_v3_0_set_powergating_state(void *handle, 488 enum amd_powergating_state state) 489 { 490 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 491 int ret; 492 493 if(state == adev->jpeg.cur_state) 494 return 0; 495 496 if (state == AMD_PG_STATE_GATE) 497 ret = jpeg_v3_0_stop(adev); 498 else 499 ret = jpeg_v3_0_start(adev); 500 501 if(!ret) 502 adev->jpeg.cur_state = state; 503 504 return ret; 505 } 506 507 static int jpeg_v3_0_set_interrupt_state(struct amdgpu_device *adev, 508 struct amdgpu_irq_src *source, 509 unsigned type, 510 enum amdgpu_interrupt_state state) 511 { 512 return 0; 513 } 514 515 static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev, 516 struct amdgpu_irq_src *source, 517 struct amdgpu_iv_entry *entry) 518 { 519 DRM_DEBUG("IH: JPEG TRAP\n"); 520 521 switch (entry->src_id) { 522 case VCN_2_0__SRCID__JPEG_DECODE: 523 amdgpu_fence_process(&adev->jpeg.inst->ring_dec); 524 break; 525 default: 526 DRM_ERROR("Unhandled interrupt: %d %d\n", 527 entry->src_id, entry->src_data[0]); 528 break; 529 } 530 531 return 0; 532 } 533 534 static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { 535 .name = "jpeg_v3_0", 536 .early_init = jpeg_v3_0_early_init, 537 .late_init = NULL, 538 .sw_init = jpeg_v3_0_sw_init, 539 .sw_fini = jpeg_v3_0_sw_fini, 540 .hw_init = jpeg_v3_0_hw_init, 541 .hw_fini = jpeg_v3_0_hw_fini, 542 .suspend = jpeg_v3_0_suspend, 543 .resume = jpeg_v3_0_resume, 544 .is_idle = jpeg_v3_0_is_idle, 545 .wait_for_idle = jpeg_v3_0_wait_for_idle, 546 .check_soft_reset = NULL, 547 .pre_soft_reset = NULL, 548 .soft_reset = NULL, 549 .post_soft_reset = NULL, 550 .set_clockgating_state = jpeg_v3_0_set_clockgating_state, 551 .set_powergating_state = jpeg_v3_0_set_powergating_state, 552 }; 553 554 static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { 555 .type = AMDGPU_RING_TYPE_VCN_JPEG, 556 .align_mask = 0xf, 557 .vmhub = AMDGPU_MMHUB_0, 558 .get_rptr = jpeg_v3_0_dec_ring_get_rptr, 559 .get_wptr = jpeg_v3_0_dec_ring_get_wptr, 560 .set_wptr = jpeg_v3_0_dec_ring_set_wptr, 561 .emit_frame_size = 562 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 563 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 564 8 + /* jpeg_v3_0_dec_ring_emit_vm_flush */ 565 18 + 18 + /* jpeg_v3_0_dec_ring_emit_fence x2 vm fence */ 566 8 + 16, 567 .emit_ib_size = 22, /* jpeg_v3_0_dec_ring_emit_ib */ 568 .emit_ib = jpeg_v2_0_dec_ring_emit_ib, 569 .emit_fence = jpeg_v2_0_dec_ring_emit_fence, 570 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, 571 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 572 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 573 .insert_nop = jpeg_v2_0_dec_ring_nop, 574 .insert_start = jpeg_v2_0_dec_ring_insert_start, 575 .insert_end = jpeg_v2_0_dec_ring_insert_end, 576 .pad_ib = amdgpu_ring_generic_pad_ib, 577 .begin_use = amdgpu_jpeg_ring_begin_use, 578 .end_use = amdgpu_jpeg_ring_end_use, 579 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, 580 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, 581 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 582 }; 583 584 static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) 585 { 586 adev->jpeg.inst->ring_dec.funcs = &jpeg_v3_0_dec_ring_vm_funcs; 587 DRM_INFO("JPEG decode is enabled in VM mode\n"); 588 } 589 590 static const struct amdgpu_irq_src_funcs jpeg_v3_0_irq_funcs = { 591 .set = jpeg_v3_0_set_interrupt_state, 592 .process = jpeg_v3_0_process_interrupt, 593 }; 594 595 static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev) 596 { 597 adev->jpeg.inst->irq.num_types = 1; 598 adev->jpeg.inst->irq.funcs = &jpeg_v3_0_irq_funcs; 599 } 600 601 const struct amdgpu_ip_block_version jpeg_v3_0_ip_block = 602 { 603 .type = AMD_IP_BLOCK_TYPE_JPEG, 604 .major = 3, 605 .minor = 0, 606 .rev = 0, 607 .funcs = &jpeg_v3_0_ip_funcs, 608 }; 609