1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "soc15.h" 27 #include "soc15d.h" 28 #include "jpeg_v2_0.h" 29 #include "jpeg_v2_5.h" 30 31 #include "vcn/vcn_2_5_offset.h" 32 #include "vcn/vcn_2_5_sh_mask.h" 33 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" 34 35 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f 36 37 #define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2 38 39 static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); 40 static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev); 41 static int jpeg_v2_5_set_powergating_state(void *handle, 42 enum amd_powergating_state state); 43 static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev); 44 45 static int amdgpu_ih_clientid_jpeg[] = { 46 SOC15_IH_CLIENTID_VCN, 47 SOC15_IH_CLIENTID_VCN1 48 }; 49 50 /** 51 * jpeg_v2_5_early_init - set function pointers 52 * 53 * @handle: amdgpu_device pointer 54 * 55 * Set ring and irq function pointers 56 */ 57 static int jpeg_v2_5_early_init(void *handle) 58 { 59 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 60 u32 harvest; 61 int i; 62 63 adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS; 64 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { 65 harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING); 66 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) 67 adev->jpeg.harvest_config |= 1 << i; 68 } 69 if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 | 70 AMDGPU_JPEG_HARVEST_JPEG1)) 71 return -ENOENT; 72 73 jpeg_v2_5_set_dec_ring_funcs(adev); 74 jpeg_v2_5_set_irq_funcs(adev); 75 jpeg_v2_5_set_ras_funcs(adev); 76 77 return 0; 78 } 79 80 /** 81 * jpeg_v2_5_sw_init - sw init for JPEG block 82 * 83 * @handle: amdgpu_device pointer 84 * 85 * Load firmware and sw initialization 86 */ 87 static int jpeg_v2_5_sw_init(void *handle) 88 { 89 struct amdgpu_ring *ring; 90 int i, r; 91 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 92 93 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 94 if (adev->jpeg.harvest_config & (1 << i)) 95 continue; 96 97 /* JPEG TRAP */ 98 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], 99 VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq); 100 if (r) 101 return r; 102 103 /* JPEG DJPEG POISON EVENT */ 104 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], 105 VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); 106 if (r) 107 return r; 108 109 /* JPEG EJPEG POISON EVENT */ 110 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], 111 VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); 112 if (r) 113 return r; 114 } 115 116 r = amdgpu_jpeg_sw_init(adev); 117 if (r) 118 return r; 119 120 r = amdgpu_jpeg_resume(adev); 121 if (r) 122 return r; 123 124 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 125 if (adev->jpeg.harvest_config & (1 << i)) 126 continue; 127 128 ring = &adev->jpeg.inst[i].ring_dec; 129 ring->use_doorbell = true; 130 if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) 131 ring->vm_hub = AMDGPU_MMHUB_1; 132 else 133 ring->vm_hub = AMDGPU_MMHUB_0; 134 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; 135 sprintf(ring->name, "jpeg_dec_%d", i); 136 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 137 0, AMDGPU_RING_PRIO_DEFAULT, NULL); 138 if (r) 139 return r; 140 141 adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; 142 adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); 143 } 144 145 r = amdgpu_jpeg_ras_sw_init(adev); 146 if (r) 147 return r; 148 149 return 0; 150 } 151 152 /** 153 * jpeg_v2_5_sw_fini - sw fini for JPEG block 154 * 155 * @handle: amdgpu_device pointer 156 * 157 * JPEG suspend and free up sw allocation 158 */ 159 static int jpeg_v2_5_sw_fini(void *handle) 160 { 161 int r; 162 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 163 164 r = amdgpu_jpeg_suspend(adev); 165 if (r) 166 return r; 167 168 r = amdgpu_jpeg_sw_fini(adev); 169 170 return r; 171 } 172 173 /** 174 * jpeg_v2_5_hw_init - start and test JPEG block 175 * 176 * @handle: amdgpu_device pointer 177 * 178 */ 179 static int jpeg_v2_5_hw_init(void *handle) 180 { 181 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 182 struct amdgpu_ring *ring; 183 int i, r; 184 185 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 186 if (adev->jpeg.harvest_config & (1 << i)) 187 continue; 188 189 ring = &adev->jpeg.inst[i].ring_dec; 190 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 191 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i); 192 193 r = amdgpu_ring_test_helper(ring); 194 if (r) 195 return r; 196 } 197 198 DRM_INFO("JPEG decode initialized successfully.\n"); 199 200 return 0; 201 } 202 203 /** 204 * jpeg_v2_5_hw_fini - stop the hardware block 205 * 206 * @handle: amdgpu_device pointer 207 * 208 * Stop the JPEG block, mark ring as not ready any more 209 */ 210 static int jpeg_v2_5_hw_fini(void *handle) 211 { 212 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 213 int i; 214 215 cancel_delayed_work_sync(&adev->vcn.idle_work); 216 217 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 218 if (adev->jpeg.harvest_config & (1 << i)) 219 continue; 220 221 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && 222 RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) 223 jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); 224 225 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) 226 amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); 227 } 228 229 return 0; 230 } 231 232 /** 233 * jpeg_v2_5_suspend - suspend JPEG block 234 * 235 * @handle: amdgpu_device pointer 236 * 237 * HW fini and suspend JPEG block 238 */ 239 static int jpeg_v2_5_suspend(void *handle) 240 { 241 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 242 int r; 243 244 r = jpeg_v2_5_hw_fini(adev); 245 if (r) 246 return r; 247 248 r = amdgpu_jpeg_suspend(adev); 249 250 return r; 251 } 252 253 /** 254 * jpeg_v2_5_resume - resume JPEG block 255 * 256 * @handle: amdgpu_device pointer 257 * 258 * Resume firmware and hw init JPEG block 259 */ 260 static int jpeg_v2_5_resume(void *handle) 261 { 262 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 263 int r; 264 265 r = amdgpu_jpeg_resume(adev); 266 if (r) 267 return r; 268 269 r = jpeg_v2_5_hw_init(adev); 270 271 return r; 272 } 273 274 static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device *adev, int inst) 275 { 276 uint32_t data; 277 278 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL); 279 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) 280 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 281 else 282 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 283 284 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 285 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 286 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data); 287 288 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE); 289 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK 290 | JPEG_CGC_GATE__JPEG2_DEC_MASK 291 | JPEG_CGC_GATE__JMCIF_MASK 292 | JPEG_CGC_GATE__JRBBM_MASK); 293 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); 294 295 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL); 296 data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK 297 | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK 298 | JPEG_CGC_CTRL__JMCIF_MODE_MASK 299 | JPEG_CGC_CTRL__JRBBM_MODE_MASK); 300 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data); 301 } 302 303 static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst) 304 { 305 uint32_t data; 306 307 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE); 308 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK 309 |JPEG_CGC_GATE__JPEG2_DEC_MASK 310 |JPEG_CGC_GATE__JPEG_ENC_MASK 311 |JPEG_CGC_GATE__JMCIF_MASK 312 |JPEG_CGC_GATE__JRBBM_MASK); 313 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); 314 } 315 316 /** 317 * jpeg_v2_5_start - start JPEG block 318 * 319 * @adev: amdgpu_device pointer 320 * 321 * Setup and start the JPEG block 322 */ 323 static int jpeg_v2_5_start(struct amdgpu_device *adev) 324 { 325 struct amdgpu_ring *ring; 326 int i; 327 328 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 329 if (adev->jpeg.harvest_config & (1 << i)) 330 continue; 331 332 ring = &adev->jpeg.inst[i].ring_dec; 333 /* disable anti hang mechanism */ 334 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0, 335 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 336 337 /* JPEG disable CGC */ 338 jpeg_v2_5_disable_clock_gating(adev, i); 339 340 /* MJPEG global tiling registers */ 341 WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, 342 adev->gfx.config.gb_addr_config); 343 WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, 344 adev->gfx.config.gb_addr_config); 345 346 /* enable JMI channel */ 347 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0, 348 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 349 350 /* enable System Interrupt for JRBC */ 351 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN), 352 JPEG_SYS_INT_EN__DJRBC_MASK, 353 ~JPEG_SYS_INT_EN__DJRBC_MASK); 354 355 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0); 356 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); 357 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 358 lower_32_bits(ring->gpu_addr)); 359 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 360 upper_32_bits(ring->gpu_addr)); 361 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0); 362 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0); 363 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); 364 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); 365 ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR); 366 } 367 368 return 0; 369 } 370 371 /** 372 * jpeg_v2_5_stop - stop JPEG block 373 * 374 * @adev: amdgpu_device pointer 375 * 376 * stop the JPEG block 377 */ 378 static int jpeg_v2_5_stop(struct amdgpu_device *adev) 379 { 380 int i; 381 382 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 383 if (adev->jpeg.harvest_config & (1 << i)) 384 continue; 385 386 /* reset JMI */ 387 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 388 UVD_JMI_CNTL__SOFT_RESET_MASK, 389 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 390 391 jpeg_v2_5_enable_clock_gating(adev, i); 392 393 /* enable anti hang mechanism */ 394 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 395 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 396 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 397 } 398 399 return 0; 400 } 401 402 /** 403 * jpeg_v2_5_dec_ring_get_rptr - get read pointer 404 * 405 * @ring: amdgpu_ring pointer 406 * 407 * Returns the current hardware read pointer 408 */ 409 static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) 410 { 411 struct amdgpu_device *adev = ring->adev; 412 413 return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR); 414 } 415 416 /** 417 * jpeg_v2_5_dec_ring_get_wptr - get write pointer 418 * 419 * @ring: amdgpu_ring pointer 420 * 421 * Returns the current hardware write pointer 422 */ 423 static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) 424 { 425 struct amdgpu_device *adev = ring->adev; 426 427 if (ring->use_doorbell) 428 return *ring->wptr_cpu_addr; 429 else 430 return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR); 431 } 432 433 /** 434 * jpeg_v2_5_dec_ring_set_wptr - set write pointer 435 * 436 * @ring: amdgpu_ring pointer 437 * 438 * Commits the write pointer to the hardware 439 */ 440 static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) 441 { 442 struct amdgpu_device *adev = ring->adev; 443 444 if (ring->use_doorbell) { 445 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 446 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 447 } else { 448 WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); 449 } 450 } 451 452 /** 453 * jpeg_v2_6_dec_ring_insert_start - insert a start command 454 * 455 * @ring: amdgpu_ring pointer 456 * 457 * Write a start command to the ring. 458 */ 459 static void jpeg_v2_6_dec_ring_insert_start(struct amdgpu_ring *ring) 460 { 461 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 462 0, 0, PACKETJ_TYPE0)); 463 amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ 464 465 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 466 0, 0, PACKETJ_TYPE0)); 467 amdgpu_ring_write(ring, 0x80000000 | (1 << (ring->me * 2 + 14))); 468 } 469 470 /** 471 * jpeg_v2_6_dec_ring_insert_end - insert a end command 472 * 473 * @ring: amdgpu_ring pointer 474 * 475 * Write a end command to the ring. 476 */ 477 static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring) 478 { 479 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 480 0, 0, PACKETJ_TYPE0)); 481 amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ 482 483 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 484 0, 0, PACKETJ_TYPE0)); 485 amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14))); 486 } 487 488 static bool jpeg_v2_5_is_idle(void *handle) 489 { 490 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 491 int i, ret = 1; 492 493 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 494 if (adev->jpeg.harvest_config & (1 << i)) 495 continue; 496 497 ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) & 498 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 499 UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); 500 } 501 502 return ret; 503 } 504 505 static int jpeg_v2_5_wait_for_idle(void *handle) 506 { 507 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 508 int i, ret; 509 510 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 511 if (adev->jpeg.harvest_config & (1 << i)) 512 continue; 513 514 ret = SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS, 515 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 516 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 517 if (ret) 518 return ret; 519 } 520 521 return 0; 522 } 523 524 static int jpeg_v2_5_set_clockgating_state(void *handle, 525 enum amd_clockgating_state state) 526 { 527 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 528 bool enable = (state == AMD_CG_STATE_GATE); 529 int i; 530 531 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 532 if (adev->jpeg.harvest_config & (1 << i)) 533 continue; 534 535 if (enable) { 536 if (!jpeg_v2_5_is_idle(handle)) 537 return -EBUSY; 538 jpeg_v2_5_enable_clock_gating(adev, i); 539 } else { 540 jpeg_v2_5_disable_clock_gating(adev, i); 541 } 542 } 543 544 return 0; 545 } 546 547 static int jpeg_v2_5_set_powergating_state(void *handle, 548 enum amd_powergating_state state) 549 { 550 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 551 int ret; 552 553 if(state == adev->jpeg.cur_state) 554 return 0; 555 556 if (state == AMD_PG_STATE_GATE) 557 ret = jpeg_v2_5_stop(adev); 558 else 559 ret = jpeg_v2_5_start(adev); 560 561 if(!ret) 562 adev->jpeg.cur_state = state; 563 564 return ret; 565 } 566 567 static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, 568 struct amdgpu_irq_src *source, 569 unsigned type, 570 enum amdgpu_interrupt_state state) 571 { 572 return 0; 573 } 574 575 static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, 576 struct amdgpu_irq_src *source, 577 unsigned int type, 578 enum amdgpu_interrupt_state state) 579 { 580 return 0; 581 } 582 583 static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, 584 struct amdgpu_irq_src *source, 585 struct amdgpu_iv_entry *entry) 586 { 587 uint32_t ip_instance; 588 589 switch (entry->client_id) { 590 case SOC15_IH_CLIENTID_VCN: 591 ip_instance = 0; 592 break; 593 case SOC15_IH_CLIENTID_VCN1: 594 ip_instance = 1; 595 break; 596 default: 597 DRM_ERROR("Unhandled client id: %d\n", entry->client_id); 598 return 0; 599 } 600 601 DRM_DEBUG("IH: JPEG TRAP\n"); 602 603 switch (entry->src_id) { 604 case VCN_2_0__SRCID__JPEG_DECODE: 605 amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); 606 break; 607 default: 608 DRM_ERROR("Unhandled interrupt: %d %d\n", 609 entry->src_id, entry->src_data[0]); 610 break; 611 } 612 613 return 0; 614 } 615 616 static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { 617 .name = "jpeg_v2_5", 618 .early_init = jpeg_v2_5_early_init, 619 .late_init = NULL, 620 .sw_init = jpeg_v2_5_sw_init, 621 .sw_fini = jpeg_v2_5_sw_fini, 622 .hw_init = jpeg_v2_5_hw_init, 623 .hw_fini = jpeg_v2_5_hw_fini, 624 .suspend = jpeg_v2_5_suspend, 625 .resume = jpeg_v2_5_resume, 626 .is_idle = jpeg_v2_5_is_idle, 627 .wait_for_idle = jpeg_v2_5_wait_for_idle, 628 .check_soft_reset = NULL, 629 .pre_soft_reset = NULL, 630 .soft_reset = NULL, 631 .post_soft_reset = NULL, 632 .set_clockgating_state = jpeg_v2_5_set_clockgating_state, 633 .set_powergating_state = jpeg_v2_5_set_powergating_state, 634 }; 635 636 static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { 637 .name = "jpeg_v2_6", 638 .early_init = jpeg_v2_5_early_init, 639 .late_init = NULL, 640 .sw_init = jpeg_v2_5_sw_init, 641 .sw_fini = jpeg_v2_5_sw_fini, 642 .hw_init = jpeg_v2_5_hw_init, 643 .hw_fini = jpeg_v2_5_hw_fini, 644 .suspend = jpeg_v2_5_suspend, 645 .resume = jpeg_v2_5_resume, 646 .is_idle = jpeg_v2_5_is_idle, 647 .wait_for_idle = jpeg_v2_5_wait_for_idle, 648 .check_soft_reset = NULL, 649 .pre_soft_reset = NULL, 650 .soft_reset = NULL, 651 .post_soft_reset = NULL, 652 .set_clockgating_state = jpeg_v2_5_set_clockgating_state, 653 .set_powergating_state = jpeg_v2_5_set_powergating_state, 654 }; 655 656 static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { 657 .type = AMDGPU_RING_TYPE_VCN_JPEG, 658 .align_mask = 0xf, 659 .get_rptr = jpeg_v2_5_dec_ring_get_rptr, 660 .get_wptr = jpeg_v2_5_dec_ring_get_wptr, 661 .set_wptr = jpeg_v2_5_dec_ring_set_wptr, 662 .emit_frame_size = 663 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 664 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 665 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */ 666 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */ 667 8 + 16, 668 .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */ 669 .emit_ib = jpeg_v2_0_dec_ring_emit_ib, 670 .emit_fence = jpeg_v2_0_dec_ring_emit_fence, 671 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, 672 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 673 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 674 .insert_nop = jpeg_v2_0_dec_ring_nop, 675 .insert_start = jpeg_v2_0_dec_ring_insert_start, 676 .insert_end = jpeg_v2_0_dec_ring_insert_end, 677 .pad_ib = amdgpu_ring_generic_pad_ib, 678 .begin_use = amdgpu_jpeg_ring_begin_use, 679 .end_use = amdgpu_jpeg_ring_end_use, 680 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, 681 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, 682 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 683 }; 684 685 static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { 686 .type = AMDGPU_RING_TYPE_VCN_JPEG, 687 .align_mask = 0xf, 688 .get_rptr = jpeg_v2_5_dec_ring_get_rptr, 689 .get_wptr = jpeg_v2_5_dec_ring_get_wptr, 690 .set_wptr = jpeg_v2_5_dec_ring_set_wptr, 691 .emit_frame_size = 692 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 693 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 694 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */ 695 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */ 696 8 + 16, 697 .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */ 698 .emit_ib = jpeg_v2_0_dec_ring_emit_ib, 699 .emit_fence = jpeg_v2_0_dec_ring_emit_fence, 700 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, 701 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 702 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 703 .insert_nop = jpeg_v2_0_dec_ring_nop, 704 .insert_start = jpeg_v2_6_dec_ring_insert_start, 705 .insert_end = jpeg_v2_6_dec_ring_insert_end, 706 .pad_ib = amdgpu_ring_generic_pad_ib, 707 .begin_use = amdgpu_jpeg_ring_begin_use, 708 .end_use = amdgpu_jpeg_ring_end_use, 709 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, 710 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, 711 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 712 }; 713 714 static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) 715 { 716 int i; 717 718 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 719 if (adev->jpeg.harvest_config & (1 << i)) 720 continue; 721 if (adev->asic_type == CHIP_ARCTURUS) 722 adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs; 723 else /* CHIP_ALDEBARAN */ 724 adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_6_dec_ring_vm_funcs; 725 adev->jpeg.inst[i].ring_dec.me = i; 726 DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i); 727 } 728 } 729 730 static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { 731 .set = jpeg_v2_5_set_interrupt_state, 732 .process = jpeg_v2_5_process_interrupt, 733 }; 734 735 static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = { 736 .set = jpeg_v2_6_set_ras_interrupt_state, 737 .process = amdgpu_jpeg_process_poison_irq, 738 }; 739 740 static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) 741 { 742 int i; 743 744 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 745 if (adev->jpeg.harvest_config & (1 << i)) 746 continue; 747 748 adev->jpeg.inst[i].irq.num_types = 1; 749 adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; 750 751 adev->jpeg.inst[i].ras_poison_irq.num_types = 1; 752 adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs; 753 } 754 } 755 756 const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = 757 { 758 .type = AMD_IP_BLOCK_TYPE_JPEG, 759 .major = 2, 760 .minor = 5, 761 .rev = 0, 762 .funcs = &jpeg_v2_5_ip_funcs, 763 }; 764 765 const struct amdgpu_ip_block_version jpeg_v2_6_ip_block = 766 { 767 .type = AMD_IP_BLOCK_TYPE_JPEG, 768 .major = 2, 769 .minor = 6, 770 .rev = 0, 771 .funcs = &jpeg_v2_6_ip_funcs, 772 }; 773 774 static uint32_t jpeg_v2_6_query_poison_by_instance(struct amdgpu_device *adev, 775 uint32_t instance, uint32_t sub_block) 776 { 777 uint32_t poison_stat = 0, reg_value = 0; 778 779 switch (sub_block) { 780 case AMDGPU_JPEG_V2_6_JPEG0: 781 reg_value = RREG32_SOC15(JPEG, instance, mmUVD_RAS_JPEG0_STATUS); 782 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF); 783 break; 784 case AMDGPU_JPEG_V2_6_JPEG1: 785 reg_value = RREG32_SOC15(JPEG, instance, mmUVD_RAS_JPEG1_STATUS); 786 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF); 787 break; 788 default: 789 break; 790 } 791 792 if (poison_stat) 793 dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n", 794 instance, sub_block); 795 796 return poison_stat; 797 } 798 799 static bool jpeg_v2_6_query_ras_poison_status(struct amdgpu_device *adev) 800 { 801 uint32_t inst = 0, sub = 0, poison_stat = 0; 802 803 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++) 804 for (sub = 0; sub < AMDGPU_JPEG_V2_6_MAX_SUB_BLOCK; sub++) 805 poison_stat += 806 jpeg_v2_6_query_poison_by_instance(adev, inst, sub); 807 808 return !!poison_stat; 809 } 810 811 const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = { 812 .query_poison_status = jpeg_v2_6_query_ras_poison_status, 813 }; 814 815 static struct amdgpu_jpeg_ras jpeg_v2_6_ras = { 816 .ras_block = { 817 .hw_ops = &jpeg_v2_6_ras_hw_ops, 818 .ras_late_init = amdgpu_jpeg_ras_late_init, 819 }, 820 }; 821 822 static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev) 823 { 824 switch (adev->ip_versions[JPEG_HWIP][0]) { 825 case IP_VERSION(2, 6, 0): 826 adev->jpeg.ras = &jpeg_v2_6_ras; 827 break; 828 default: 829 break; 830 } 831 } 832