1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "soc15.h" 27 #include "soc15d.h" 28 #include "jpeg_v2_0.h" 29 #include "jpeg_v2_5.h" 30 31 #include "vcn/vcn_2_5_offset.h" 32 #include "vcn/vcn_2_5_sh_mask.h" 33 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" 34 35 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f 36 37 #define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2 38 39 static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); 40 static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev); 41 static int jpeg_v2_5_set_powergating_state(void *handle, 42 enum amd_powergating_state state); 43 static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev); 44 45 static int amdgpu_ih_clientid_jpeg[] = { 46 SOC15_IH_CLIENTID_VCN, 47 SOC15_IH_CLIENTID_VCN1 48 }; 49 50 /** 51 * jpeg_v2_5_early_init - set function pointers 52 * 53 * @handle: amdgpu_device pointer 54 * 55 * Set ring and irq function pointers 56 */ 57 static int jpeg_v2_5_early_init(void *handle) 58 { 59 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 60 u32 harvest; 61 int i; 62 63 adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS; 64 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { 65 harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING); 66 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) 67 adev->jpeg.harvest_config |= 1 << i; 68 } 69 if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 | 70 AMDGPU_JPEG_HARVEST_JPEG1)) 71 return -ENOENT; 72 73 jpeg_v2_5_set_dec_ring_funcs(adev); 74 jpeg_v2_5_set_irq_funcs(adev); 75 jpeg_v2_5_set_ras_funcs(adev); 76 77 return 0; 78 } 79 80 /** 81 * jpeg_v2_5_sw_init - sw init for JPEG block 82 * 83 * @handle: amdgpu_device pointer 84 * 85 * Load firmware and sw initialization 86 */ 87 static int jpeg_v2_5_sw_init(void *handle) 88 { 89 struct amdgpu_ring *ring; 90 int i, r; 91 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 92 93 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 94 if (adev->jpeg.harvest_config & (1 << i)) 95 continue; 96 97 /* JPEG TRAP */ 98 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], 99 VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq); 100 if (r) 101 return r; 102 103 /* JPEG DJPEG POISON EVENT */ 104 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], 105 VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq); 106 if (r) 107 return r; 108 109 /* JPEG EJPEG POISON EVENT */ 110 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], 111 VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq); 112 if (r) 113 return r; 114 } 115 116 r = amdgpu_jpeg_sw_init(adev); 117 if (r) 118 return r; 119 120 r = amdgpu_jpeg_resume(adev); 121 if (r) 122 return r; 123 124 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 125 if (adev->jpeg.harvest_config & (1 << i)) 126 continue; 127 128 ring = &adev->jpeg.inst[i].ring_dec; 129 ring->use_doorbell = true; 130 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; 131 sprintf(ring->name, "jpeg_dec_%d", i); 132 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 133 0, AMDGPU_RING_PRIO_DEFAULT, NULL); 134 if (r) 135 return r; 136 137 adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; 138 adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); 139 } 140 141 r = amdgpu_jpeg_ras_sw_init(adev); 142 if (r) 143 return r; 144 145 return 0; 146 } 147 148 /** 149 * jpeg_v2_5_sw_fini - sw fini for JPEG block 150 * 151 * @handle: amdgpu_device pointer 152 * 153 * JPEG suspend and free up sw allocation 154 */ 155 static int jpeg_v2_5_sw_fini(void *handle) 156 { 157 int r; 158 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 159 160 r = amdgpu_jpeg_suspend(adev); 161 if (r) 162 return r; 163 164 r = amdgpu_jpeg_sw_fini(adev); 165 166 return r; 167 } 168 169 /** 170 * jpeg_v2_5_hw_init - start and test JPEG block 171 * 172 * @handle: amdgpu_device pointer 173 * 174 */ 175 static int jpeg_v2_5_hw_init(void *handle) 176 { 177 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 178 struct amdgpu_ring *ring; 179 int i, r; 180 181 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 182 if (adev->jpeg.harvest_config & (1 << i)) 183 continue; 184 185 ring = &adev->jpeg.inst[i].ring_dec; 186 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 187 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i); 188 189 r = amdgpu_ring_test_helper(ring); 190 if (r) 191 return r; 192 } 193 194 DRM_INFO("JPEG decode initialized successfully.\n"); 195 196 return 0; 197 } 198 199 /** 200 * jpeg_v2_5_hw_fini - stop the hardware block 201 * 202 * @handle: amdgpu_device pointer 203 * 204 * Stop the JPEG block, mark ring as not ready any more 205 */ 206 static int jpeg_v2_5_hw_fini(void *handle) 207 { 208 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 209 int i; 210 211 cancel_delayed_work_sync(&adev->vcn.idle_work); 212 213 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 214 if (adev->jpeg.harvest_config & (1 << i)) 215 continue; 216 217 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && 218 RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) 219 jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); 220 } 221 222 return 0; 223 } 224 225 /** 226 * jpeg_v2_5_suspend - suspend JPEG block 227 * 228 * @handle: amdgpu_device pointer 229 * 230 * HW fini and suspend JPEG block 231 */ 232 static int jpeg_v2_5_suspend(void *handle) 233 { 234 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 235 int r; 236 237 r = jpeg_v2_5_hw_fini(adev); 238 if (r) 239 return r; 240 241 r = amdgpu_jpeg_suspend(adev); 242 243 return r; 244 } 245 246 /** 247 * jpeg_v2_5_resume - resume JPEG block 248 * 249 * @handle: amdgpu_device pointer 250 * 251 * Resume firmware and hw init JPEG block 252 */ 253 static int jpeg_v2_5_resume(void *handle) 254 { 255 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 256 int r; 257 258 r = amdgpu_jpeg_resume(adev); 259 if (r) 260 return r; 261 262 r = jpeg_v2_5_hw_init(adev); 263 264 return r; 265 } 266 267 static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device *adev, int inst) 268 { 269 uint32_t data; 270 271 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL); 272 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) 273 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 274 else 275 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 276 277 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 278 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 279 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data); 280 281 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE); 282 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK 283 | JPEG_CGC_GATE__JPEG2_DEC_MASK 284 | JPEG_CGC_GATE__JMCIF_MASK 285 | JPEG_CGC_GATE__JRBBM_MASK); 286 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); 287 288 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL); 289 data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK 290 | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK 291 | JPEG_CGC_CTRL__JMCIF_MODE_MASK 292 | JPEG_CGC_CTRL__JRBBM_MODE_MASK); 293 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data); 294 } 295 296 static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst) 297 { 298 uint32_t data; 299 300 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE); 301 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK 302 |JPEG_CGC_GATE__JPEG2_DEC_MASK 303 |JPEG_CGC_GATE__JPEG_ENC_MASK 304 |JPEG_CGC_GATE__JMCIF_MASK 305 |JPEG_CGC_GATE__JRBBM_MASK); 306 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); 307 } 308 309 /** 310 * jpeg_v2_5_start - start JPEG block 311 * 312 * @adev: amdgpu_device pointer 313 * 314 * Setup and start the JPEG block 315 */ 316 static int jpeg_v2_5_start(struct amdgpu_device *adev) 317 { 318 struct amdgpu_ring *ring; 319 int i; 320 321 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 322 if (adev->jpeg.harvest_config & (1 << i)) 323 continue; 324 325 ring = &adev->jpeg.inst[i].ring_dec; 326 /* disable anti hang mechanism */ 327 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0, 328 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 329 330 /* JPEG disable CGC */ 331 jpeg_v2_5_disable_clock_gating(adev, i); 332 333 /* MJPEG global tiling registers */ 334 WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, 335 adev->gfx.config.gb_addr_config); 336 WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, 337 adev->gfx.config.gb_addr_config); 338 339 /* enable JMI channel */ 340 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0, 341 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 342 343 /* enable System Interrupt for JRBC */ 344 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN), 345 JPEG_SYS_INT_EN__DJRBC_MASK, 346 ~JPEG_SYS_INT_EN__DJRBC_MASK); 347 348 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0); 349 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); 350 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 351 lower_32_bits(ring->gpu_addr)); 352 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 353 upper_32_bits(ring->gpu_addr)); 354 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0); 355 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0); 356 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); 357 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); 358 ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR); 359 } 360 361 return 0; 362 } 363 364 /** 365 * jpeg_v2_5_stop - stop JPEG block 366 * 367 * @adev: amdgpu_device pointer 368 * 369 * stop the JPEG block 370 */ 371 static int jpeg_v2_5_stop(struct amdgpu_device *adev) 372 { 373 int i; 374 375 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 376 if (adev->jpeg.harvest_config & (1 << i)) 377 continue; 378 379 /* reset JMI */ 380 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 381 UVD_JMI_CNTL__SOFT_RESET_MASK, 382 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 383 384 jpeg_v2_5_enable_clock_gating(adev, i); 385 386 /* enable anti hang mechanism */ 387 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 388 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 389 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 390 } 391 392 return 0; 393 } 394 395 /** 396 * jpeg_v2_5_dec_ring_get_rptr - get read pointer 397 * 398 * @ring: amdgpu_ring pointer 399 * 400 * Returns the current hardware read pointer 401 */ 402 static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) 403 { 404 struct amdgpu_device *adev = ring->adev; 405 406 return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR); 407 } 408 409 /** 410 * jpeg_v2_5_dec_ring_get_wptr - get write pointer 411 * 412 * @ring: amdgpu_ring pointer 413 * 414 * Returns the current hardware write pointer 415 */ 416 static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) 417 { 418 struct amdgpu_device *adev = ring->adev; 419 420 if (ring->use_doorbell) 421 return *ring->wptr_cpu_addr; 422 else 423 return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR); 424 } 425 426 /** 427 * jpeg_v2_5_dec_ring_set_wptr - set write pointer 428 * 429 * @ring: amdgpu_ring pointer 430 * 431 * Commits the write pointer to the hardware 432 */ 433 static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) 434 { 435 struct amdgpu_device *adev = ring->adev; 436 437 if (ring->use_doorbell) { 438 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 439 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 440 } else { 441 WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); 442 } 443 } 444 445 /** 446 * jpeg_v2_6_dec_ring_insert_start - insert a start command 447 * 448 * @ring: amdgpu_ring pointer 449 * 450 * Write a start command to the ring. 451 */ 452 static void jpeg_v2_6_dec_ring_insert_start(struct amdgpu_ring *ring) 453 { 454 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 455 0, 0, PACKETJ_TYPE0)); 456 amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ 457 458 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 459 0, 0, PACKETJ_TYPE0)); 460 amdgpu_ring_write(ring, 0x80000000 | (1 << (ring->me * 2 + 14))); 461 } 462 463 /** 464 * jpeg_v2_6_dec_ring_insert_end - insert a end command 465 * 466 * @ring: amdgpu_ring pointer 467 * 468 * Write a end command to the ring. 469 */ 470 static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring) 471 { 472 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 473 0, 0, PACKETJ_TYPE0)); 474 amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ 475 476 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 477 0, 0, PACKETJ_TYPE0)); 478 amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14))); 479 } 480 481 static bool jpeg_v2_5_is_idle(void *handle) 482 { 483 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 484 int i, ret = 1; 485 486 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 487 if (adev->jpeg.harvest_config & (1 << i)) 488 continue; 489 490 ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) & 491 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 492 UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); 493 } 494 495 return ret; 496 } 497 498 static int jpeg_v2_5_wait_for_idle(void *handle) 499 { 500 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 501 int i, ret; 502 503 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 504 if (adev->jpeg.harvest_config & (1 << i)) 505 continue; 506 507 ret = SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS, 508 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 509 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 510 if (ret) 511 return ret; 512 } 513 514 return 0; 515 } 516 517 static int jpeg_v2_5_set_clockgating_state(void *handle, 518 enum amd_clockgating_state state) 519 { 520 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 521 bool enable = (state == AMD_CG_STATE_GATE); 522 int i; 523 524 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 525 if (adev->jpeg.harvest_config & (1 << i)) 526 continue; 527 528 if (enable) { 529 if (!jpeg_v2_5_is_idle(handle)) 530 return -EBUSY; 531 jpeg_v2_5_enable_clock_gating(adev, i); 532 } else { 533 jpeg_v2_5_disable_clock_gating(adev, i); 534 } 535 } 536 537 return 0; 538 } 539 540 static int jpeg_v2_5_set_powergating_state(void *handle, 541 enum amd_powergating_state state) 542 { 543 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 544 int ret; 545 546 if(state == adev->jpeg.cur_state) 547 return 0; 548 549 if (state == AMD_PG_STATE_GATE) 550 ret = jpeg_v2_5_stop(adev); 551 else 552 ret = jpeg_v2_5_start(adev); 553 554 if(!ret) 555 adev->jpeg.cur_state = state; 556 557 return ret; 558 } 559 560 static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, 561 struct amdgpu_irq_src *source, 562 unsigned type, 563 enum amdgpu_interrupt_state state) 564 { 565 return 0; 566 } 567 568 static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, 569 struct amdgpu_irq_src *source, 570 struct amdgpu_iv_entry *entry) 571 { 572 uint32_t ip_instance; 573 574 switch (entry->client_id) { 575 case SOC15_IH_CLIENTID_VCN: 576 ip_instance = 0; 577 break; 578 case SOC15_IH_CLIENTID_VCN1: 579 ip_instance = 1; 580 break; 581 default: 582 DRM_ERROR("Unhandled client id: %d\n", entry->client_id); 583 return 0; 584 } 585 586 DRM_DEBUG("IH: JPEG TRAP\n"); 587 588 switch (entry->src_id) { 589 case VCN_2_0__SRCID__JPEG_DECODE: 590 amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); 591 break; 592 case VCN_2_6__SRCID_DJPEG0_POISON: 593 case VCN_2_6__SRCID_EJPEG0_POISON: 594 amdgpu_jpeg_process_poison_irq(adev, source, entry); 595 break; 596 default: 597 DRM_ERROR("Unhandled interrupt: %d %d\n", 598 entry->src_id, entry->src_data[0]); 599 break; 600 } 601 602 return 0; 603 } 604 605 static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { 606 .name = "jpeg_v2_5", 607 .early_init = jpeg_v2_5_early_init, 608 .late_init = NULL, 609 .sw_init = jpeg_v2_5_sw_init, 610 .sw_fini = jpeg_v2_5_sw_fini, 611 .hw_init = jpeg_v2_5_hw_init, 612 .hw_fini = jpeg_v2_5_hw_fini, 613 .suspend = jpeg_v2_5_suspend, 614 .resume = jpeg_v2_5_resume, 615 .is_idle = jpeg_v2_5_is_idle, 616 .wait_for_idle = jpeg_v2_5_wait_for_idle, 617 .check_soft_reset = NULL, 618 .pre_soft_reset = NULL, 619 .soft_reset = NULL, 620 .post_soft_reset = NULL, 621 .set_clockgating_state = jpeg_v2_5_set_clockgating_state, 622 .set_powergating_state = jpeg_v2_5_set_powergating_state, 623 }; 624 625 static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { 626 .name = "jpeg_v2_6", 627 .early_init = jpeg_v2_5_early_init, 628 .late_init = NULL, 629 .sw_init = jpeg_v2_5_sw_init, 630 .sw_fini = jpeg_v2_5_sw_fini, 631 .hw_init = jpeg_v2_5_hw_init, 632 .hw_fini = jpeg_v2_5_hw_fini, 633 .suspend = jpeg_v2_5_suspend, 634 .resume = jpeg_v2_5_resume, 635 .is_idle = jpeg_v2_5_is_idle, 636 .wait_for_idle = jpeg_v2_5_wait_for_idle, 637 .check_soft_reset = NULL, 638 .pre_soft_reset = NULL, 639 .soft_reset = NULL, 640 .post_soft_reset = NULL, 641 .set_clockgating_state = jpeg_v2_5_set_clockgating_state, 642 .set_powergating_state = jpeg_v2_5_set_powergating_state, 643 }; 644 645 static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { 646 .type = AMDGPU_RING_TYPE_VCN_JPEG, 647 .align_mask = 0xf, 648 .vmhub = AMDGPU_MMHUB_1, 649 .get_rptr = jpeg_v2_5_dec_ring_get_rptr, 650 .get_wptr = jpeg_v2_5_dec_ring_get_wptr, 651 .set_wptr = jpeg_v2_5_dec_ring_set_wptr, 652 .emit_frame_size = 653 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 654 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 655 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */ 656 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */ 657 8 + 16, 658 .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */ 659 .emit_ib = jpeg_v2_0_dec_ring_emit_ib, 660 .emit_fence = jpeg_v2_0_dec_ring_emit_fence, 661 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, 662 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 663 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 664 .insert_nop = jpeg_v2_0_dec_ring_nop, 665 .insert_start = jpeg_v2_0_dec_ring_insert_start, 666 .insert_end = jpeg_v2_0_dec_ring_insert_end, 667 .pad_ib = amdgpu_ring_generic_pad_ib, 668 .begin_use = amdgpu_jpeg_ring_begin_use, 669 .end_use = amdgpu_jpeg_ring_end_use, 670 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, 671 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, 672 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 673 }; 674 675 static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { 676 .type = AMDGPU_RING_TYPE_VCN_JPEG, 677 .align_mask = 0xf, 678 .vmhub = AMDGPU_MMHUB_0, 679 .get_rptr = jpeg_v2_5_dec_ring_get_rptr, 680 .get_wptr = jpeg_v2_5_dec_ring_get_wptr, 681 .set_wptr = jpeg_v2_5_dec_ring_set_wptr, 682 .emit_frame_size = 683 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 684 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 685 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */ 686 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */ 687 8 + 16, 688 .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */ 689 .emit_ib = jpeg_v2_0_dec_ring_emit_ib, 690 .emit_fence = jpeg_v2_0_dec_ring_emit_fence, 691 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, 692 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 693 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 694 .insert_nop = jpeg_v2_0_dec_ring_nop, 695 .insert_start = jpeg_v2_6_dec_ring_insert_start, 696 .insert_end = jpeg_v2_6_dec_ring_insert_end, 697 .pad_ib = amdgpu_ring_generic_pad_ib, 698 .begin_use = amdgpu_jpeg_ring_begin_use, 699 .end_use = amdgpu_jpeg_ring_end_use, 700 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, 701 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, 702 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 703 }; 704 705 static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) 706 { 707 int i; 708 709 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 710 if (adev->jpeg.harvest_config & (1 << i)) 711 continue; 712 if (adev->asic_type == CHIP_ARCTURUS) 713 adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs; 714 else /* CHIP_ALDEBARAN */ 715 adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_6_dec_ring_vm_funcs; 716 adev->jpeg.inst[i].ring_dec.me = i; 717 DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i); 718 } 719 } 720 721 static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { 722 .set = jpeg_v2_5_set_interrupt_state, 723 .process = jpeg_v2_5_process_interrupt, 724 }; 725 726 static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) 727 { 728 int i; 729 730 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 731 if (adev->jpeg.harvest_config & (1 << i)) 732 continue; 733 734 adev->jpeg.inst[i].irq.num_types = 1; 735 adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; 736 } 737 } 738 739 const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = 740 { 741 .type = AMD_IP_BLOCK_TYPE_JPEG, 742 .major = 2, 743 .minor = 5, 744 .rev = 0, 745 .funcs = &jpeg_v2_5_ip_funcs, 746 }; 747 748 const struct amdgpu_ip_block_version jpeg_v2_6_ip_block = 749 { 750 .type = AMD_IP_BLOCK_TYPE_JPEG, 751 .major = 2, 752 .minor = 6, 753 .rev = 0, 754 .funcs = &jpeg_v2_6_ip_funcs, 755 }; 756 757 static uint32_t jpeg_v2_6_query_poison_by_instance(struct amdgpu_device *adev, 758 uint32_t instance, uint32_t sub_block) 759 { 760 uint32_t poison_stat = 0, reg_value = 0; 761 762 switch (sub_block) { 763 case AMDGPU_JPEG_V2_6_JPEG0: 764 reg_value = RREG32_SOC15(JPEG, instance, mmUVD_RAS_JPEG0_STATUS); 765 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF); 766 break; 767 case AMDGPU_JPEG_V2_6_JPEG1: 768 reg_value = RREG32_SOC15(JPEG, instance, mmUVD_RAS_JPEG1_STATUS); 769 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF); 770 break; 771 default: 772 break; 773 } 774 775 if (poison_stat) 776 dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n", 777 instance, sub_block); 778 779 return poison_stat; 780 } 781 782 static bool jpeg_v2_6_query_ras_poison_status(struct amdgpu_device *adev) 783 { 784 uint32_t inst = 0, sub = 0, poison_stat = 0; 785 786 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++) 787 for (sub = 0; sub < AMDGPU_JPEG_V2_6_MAX_SUB_BLOCK; sub++) 788 poison_stat += 789 jpeg_v2_6_query_poison_by_instance(adev, inst, sub); 790 791 return !!poison_stat; 792 } 793 794 const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = { 795 .query_poison_status = jpeg_v2_6_query_ras_poison_status, 796 }; 797 798 static struct amdgpu_jpeg_ras jpeg_v2_6_ras = { 799 .ras_block = { 800 .hw_ops = &jpeg_v2_6_ras_hw_ops, 801 }, 802 }; 803 804 static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev) 805 { 806 switch (adev->ip_versions[JPEG_HWIP][0]) { 807 case IP_VERSION(2, 6, 0): 808 adev->jpeg.ras = &jpeg_v2_6_ras; 809 break; 810 default: 811 break; 812 } 813 } 814