1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "soc15.h" 27 #include "soc15d.h" 28 #include "jpeg_v4_0_3.h" 29 30 #include "vcn/vcn_4_0_3_offset.h" 31 #include "vcn/vcn_4_0_3_sh_mask.h" 32 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" 33 34 enum jpeg_engin_status { 35 UVD_PGFSM_STATUS__UVDJ_PWR_ON = 0, 36 UVD_PGFSM_STATUS__UVDJ_PWR_OFF = 2, 37 }; 38 39 static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev); 40 static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev); 41 static int jpeg_v4_0_3_set_powergating_state(void *handle, 42 enum amd_powergating_state state); 43 static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev); 44 45 static int amdgpu_ih_srcid_jpeg[] = { 46 VCN_4_0__SRCID__JPEG_DECODE, 47 VCN_4_0__SRCID__JPEG1_DECODE, 48 VCN_4_0__SRCID__JPEG2_DECODE, 49 VCN_4_0__SRCID__JPEG3_DECODE, 50 VCN_4_0__SRCID__JPEG4_DECODE, 51 VCN_4_0__SRCID__JPEG5_DECODE, 52 VCN_4_0__SRCID__JPEG6_DECODE, 53 VCN_4_0__SRCID__JPEG7_DECODE 54 }; 55 56 /** 57 * jpeg_v4_0_3_early_init - set function pointers 58 * 59 * @handle: amdgpu_device pointer 60 * 61 * Set ring and irq function pointers 62 */ 63 static int jpeg_v4_0_3_early_init(void *handle) 64 { 65 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 66 67 adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS; 68 69 jpeg_v4_0_3_set_dec_ring_funcs(adev); 70 jpeg_v4_0_3_set_irq_funcs(adev); 71 jpeg_v4_0_3_set_ras_funcs(adev); 72 73 return 0; 74 } 75 76 /** 77 * jpeg_v4_0_3_sw_init - sw init for JPEG block 78 * 79 * @handle: amdgpu_device pointer 80 * 81 * Load firmware and sw initialization 82 */ 83 static int jpeg_v4_0_3_sw_init(void *handle) 84 { 85 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 86 struct amdgpu_ring *ring; 87 int i, j, r, jpeg_inst; 88 89 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 90 /* JPEG TRAP */ 91 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 92 amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq); 93 if (r) 94 return r; 95 } 96 97 r = amdgpu_jpeg_sw_init(adev); 98 if (r) 99 return r; 100 101 r = amdgpu_jpeg_resume(adev); 102 if (r) 103 return r; 104 105 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 106 jpeg_inst = GET_INST(JPEG, i); 107 108 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 109 ring = &adev->jpeg.inst[i].ring_dec[j]; 110 ring->use_doorbell = true; 111 ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id); 112 ring->doorbell_index = 113 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 114 1 + j + 9 * jpeg_inst; 115 sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j); 116 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, 117 AMDGPU_RING_PRIO_DEFAULT, NULL); 118 if (r) 119 return r; 120 121 adev->jpeg.internal.jpeg_pitch[j] = 122 regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET; 123 adev->jpeg.inst[i].external.jpeg_pitch[j] = 124 SOC15_REG_OFFSET1( 125 JPEG, jpeg_inst, 126 regUVD_JRBC0_UVD_JRBC_SCRATCH0, 127 (j ? (0x40 * j - 0xc80) : 0)); 128 } 129 } 130 131 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) { 132 r = amdgpu_jpeg_ras_sw_init(adev); 133 if (r) { 134 dev_err(adev->dev, "Failed to initialize jpeg ras block!\n"); 135 return r; 136 } 137 } 138 139 return 0; 140 } 141 142 /** 143 * jpeg_v4_0_3_sw_fini - sw fini for JPEG block 144 * 145 * @handle: amdgpu_device pointer 146 * 147 * JPEG suspend and free up sw allocation 148 */ 149 static int jpeg_v4_0_3_sw_fini(void *handle) 150 { 151 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 152 int r; 153 154 r = amdgpu_jpeg_suspend(adev); 155 if (r) 156 return r; 157 158 r = amdgpu_jpeg_sw_fini(adev); 159 160 return r; 161 } 162 163 /** 164 * jpeg_v4_0_3_hw_init - start and test JPEG block 165 * 166 * @handle: amdgpu_device pointer 167 * 168 */ 169 static int jpeg_v4_0_3_hw_init(void *handle) 170 { 171 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 172 struct amdgpu_ring *ring; 173 int i, j, r, jpeg_inst; 174 175 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 176 jpeg_inst = GET_INST(JPEG, i); 177 178 ring = adev->jpeg.inst[i].ring_dec; 179 180 if (ring->use_doorbell) 181 adev->nbio.funcs->vcn_doorbell_range( 182 adev, ring->use_doorbell, 183 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 184 9 * jpeg_inst, 185 adev->jpeg.inst[i].aid_id); 186 187 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 188 ring = &adev->jpeg.inst[i].ring_dec[j]; 189 if (ring->use_doorbell) 190 WREG32_SOC15_OFFSET( 191 VCN, GET_INST(VCN, i), 192 regVCN_JPEG_DB_CTRL, 193 (ring->pipe ? (ring->pipe - 0x15) : 0), 194 ring->doorbell_index 195 << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | 196 VCN_JPEG_DB_CTRL__EN_MASK); 197 r = amdgpu_ring_test_helper(ring); 198 if (r) 199 return r; 200 } 201 } 202 DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); 203 204 return 0; 205 } 206 207 /** 208 * jpeg_v4_0_3_hw_fini - stop the hardware block 209 * 210 * @handle: amdgpu_device pointer 211 * 212 * Stop the JPEG block, mark ring as not ready any more 213 */ 214 static int jpeg_v4_0_3_hw_fini(void *handle) 215 { 216 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 217 int ret = 0; 218 219 cancel_delayed_work_sync(&adev->jpeg.idle_work); 220 221 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) 222 ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE); 223 224 return ret; 225 } 226 227 /** 228 * jpeg_v4_0_3_suspend - suspend JPEG block 229 * 230 * @handle: amdgpu_device pointer 231 * 232 * HW fini and suspend JPEG block 233 */ 234 static int jpeg_v4_0_3_suspend(void *handle) 235 { 236 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 237 int r; 238 239 r = jpeg_v4_0_3_hw_fini(adev); 240 if (r) 241 return r; 242 243 r = amdgpu_jpeg_suspend(adev); 244 245 return r; 246 } 247 248 /** 249 * jpeg_v4_0_3_resume - resume JPEG block 250 * 251 * @handle: amdgpu_device pointer 252 * 253 * Resume firmware and hw init JPEG block 254 */ 255 static int jpeg_v4_0_3_resume(void *handle) 256 { 257 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 258 int r; 259 260 r = amdgpu_jpeg_resume(adev); 261 if (r) 262 return r; 263 264 r = jpeg_v4_0_3_hw_init(adev); 265 266 return r; 267 } 268 269 static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx) 270 { 271 int i, jpeg_inst; 272 uint32_t data; 273 274 jpeg_inst = GET_INST(JPEG, inst_idx); 275 data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL); 276 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { 277 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 278 data &= (~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1)); 279 } else { 280 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 281 } 282 283 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 284 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 285 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data); 286 287 data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE); 288 data &= ~(JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); 289 for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) 290 data &= ~(JPEG_CGC_GATE__JPEG0_DEC_MASK << i); 291 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data); 292 } 293 294 static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx) 295 { 296 int i, jpeg_inst; 297 uint32_t data; 298 299 jpeg_inst = GET_INST(JPEG, inst_idx); 300 data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL); 301 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { 302 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 303 data |= (JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1); 304 } else { 305 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 306 } 307 308 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 309 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 310 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data); 311 312 data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE); 313 data |= (JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); 314 for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) 315 data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK << i); 316 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data); 317 } 318 319 /** 320 * jpeg_v4_0_3_start - start JPEG block 321 * 322 * @adev: amdgpu_device pointer 323 * 324 * Setup and start the JPEG block 325 */ 326 static int jpeg_v4_0_3_start(struct amdgpu_device *adev) 327 { 328 struct amdgpu_ring *ring; 329 int i, j, jpeg_inst; 330 331 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 332 jpeg_inst = GET_INST(JPEG, i); 333 334 WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG, 335 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); 336 SOC15_WAIT_ON_RREG( 337 JPEG, jpeg_inst, regUVD_PGFSM_STATUS, 338 UVD_PGFSM_STATUS__UVDJ_PWR_ON 339 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, 340 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 341 342 /* disable anti hang mechanism */ 343 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, 344 regUVD_JPEG_POWER_STATUS), 345 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 346 347 /* JPEG disable CGC */ 348 jpeg_v4_0_3_disable_clock_gating(adev, i); 349 350 /* MJPEG global tiling registers */ 351 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX8_ADDR_CONFIG, 352 adev->gfx.config.gb_addr_config); 353 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX10_ADDR_CONFIG, 354 adev->gfx.config.gb_addr_config); 355 356 /* enable JMI channel */ 357 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0, 358 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 359 360 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 361 unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); 362 363 ring = &adev->jpeg.inst[i].ring_dec[j]; 364 365 /* enable System Interrupt for JRBC */ 366 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, 367 regJPEG_SYS_INT_EN), 368 JPEG_SYS_INT_EN__DJRBC0_MASK << j, 369 ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j)); 370 371 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 372 regUVD_JMI0_UVD_LMI_JRBC_RB_VMID, 373 reg_offset, 0); 374 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 375 regUVD_JRBC0_UVD_JRBC_RB_CNTL, 376 reg_offset, 377 (0x00000001L | 0x00000002L)); 378 WREG32_SOC15_OFFSET( 379 JPEG, jpeg_inst, 380 regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW, 381 reg_offset, lower_32_bits(ring->gpu_addr)); 382 WREG32_SOC15_OFFSET( 383 JPEG, jpeg_inst, 384 regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 385 reg_offset, upper_32_bits(ring->gpu_addr)); 386 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 387 regUVD_JRBC0_UVD_JRBC_RB_RPTR, 388 reg_offset, 0); 389 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 390 regUVD_JRBC0_UVD_JRBC_RB_WPTR, 391 reg_offset, 0); 392 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 393 regUVD_JRBC0_UVD_JRBC_RB_CNTL, 394 reg_offset, 0x00000002L); 395 WREG32_SOC15_OFFSET(JPEG, jpeg_inst, 396 regUVD_JRBC0_UVD_JRBC_RB_SIZE, 397 reg_offset, ring->ring_size / 4); 398 ring->wptr = RREG32_SOC15_OFFSET( 399 JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_RB_WPTR, 400 reg_offset); 401 } 402 } 403 404 return 0; 405 } 406 407 /** 408 * jpeg_v4_0_3_stop - stop JPEG block 409 * 410 * @adev: amdgpu_device pointer 411 * 412 * stop the JPEG block 413 */ 414 static int jpeg_v4_0_3_stop(struct amdgpu_device *adev) 415 { 416 int i, jpeg_inst; 417 418 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 419 jpeg_inst = GET_INST(JPEG, i); 420 /* reset JMI */ 421 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 422 UVD_JMI_CNTL__SOFT_RESET_MASK, 423 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 424 425 jpeg_v4_0_3_enable_clock_gating(adev, i); 426 427 /* enable anti hang mechanism */ 428 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, 429 regUVD_JPEG_POWER_STATUS), 430 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 431 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 432 433 WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG, 434 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); 435 SOC15_WAIT_ON_RREG( 436 JPEG, jpeg_inst, regUVD_PGFSM_STATUS, 437 UVD_PGFSM_STATUS__UVDJ_PWR_OFF 438 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, 439 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 440 } 441 442 return 0; 443 } 444 445 /** 446 * jpeg_v4_0_3_dec_ring_get_rptr - get read pointer 447 * 448 * @ring: amdgpu_ring pointer 449 * 450 * Returns the current hardware read pointer 451 */ 452 static uint64_t jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring) 453 { 454 struct amdgpu_device *adev = ring->adev; 455 456 return RREG32_SOC15_OFFSET( 457 JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_RPTR, 458 ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0); 459 } 460 461 /** 462 * jpeg_v4_0_3_dec_ring_get_wptr - get write pointer 463 * 464 * @ring: amdgpu_ring pointer 465 * 466 * Returns the current hardware write pointer 467 */ 468 static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring) 469 { 470 struct amdgpu_device *adev = ring->adev; 471 472 if (ring->use_doorbell) 473 return adev->wb.wb[ring->wptr_offs]; 474 else 475 return RREG32_SOC15_OFFSET( 476 JPEG, GET_INST(JPEG, ring->me), 477 regUVD_JRBC0_UVD_JRBC_RB_WPTR, 478 ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0); 479 } 480 481 /** 482 * jpeg_v4_0_3_dec_ring_set_wptr - set write pointer 483 * 484 * @ring: amdgpu_ring pointer 485 * 486 * Commits the write pointer to the hardware 487 */ 488 static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) 489 { 490 struct amdgpu_device *adev = ring->adev; 491 492 if (ring->use_doorbell) { 493 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 494 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 495 } else { 496 WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), 497 regUVD_JRBC0_UVD_JRBC_RB_WPTR, 498 (ring->pipe ? (0x40 * ring->pipe - 0xc80) : 499 0), 500 lower_32_bits(ring->wptr)); 501 } 502 } 503 504 /** 505 * jpeg_v4_0_3_dec_ring_insert_start - insert a start command 506 * 507 * @ring: amdgpu_ring pointer 508 * 509 * Write a start command to the ring. 510 */ 511 static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) 512 { 513 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 514 0, 0, PACKETJ_TYPE0)); 515 amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ 516 517 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 518 0, 0, PACKETJ_TYPE0)); 519 amdgpu_ring_write(ring, 0x80004000); 520 } 521 522 /** 523 * jpeg_v4_0_3_dec_ring_insert_end - insert a end command 524 * 525 * @ring: amdgpu_ring pointer 526 * 527 * Write a end command to the ring. 528 */ 529 static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) 530 { 531 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 532 0, 0, PACKETJ_TYPE0)); 533 amdgpu_ring_write(ring, 0x62a04); 534 535 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 536 0, 0, PACKETJ_TYPE0)); 537 amdgpu_ring_write(ring, 0x00004000); 538 } 539 540 /** 541 * jpeg_v4_0_3_dec_ring_emit_fence - emit an fence & trap command 542 * 543 * @ring: amdgpu_ring pointer 544 * @addr: address 545 * @seq: sequence number 546 * @flags: fence related flags 547 * 548 * Write a fence and a trap command to the ring. 549 */ 550 static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 551 unsigned int flags) 552 { 553 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 554 555 amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET, 556 0, 0, PACKETJ_TYPE0)); 557 amdgpu_ring_write(ring, seq); 558 559 amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET, 560 0, 0, PACKETJ_TYPE0)); 561 amdgpu_ring_write(ring, seq); 562 563 amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET, 564 0, 0, PACKETJ_TYPE0)); 565 amdgpu_ring_write(ring, lower_32_bits(addr)); 566 567 amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET, 568 0, 0, PACKETJ_TYPE0)); 569 amdgpu_ring_write(ring, upper_32_bits(addr)); 570 571 amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, 572 0, 0, PACKETJ_TYPE0)); 573 amdgpu_ring_write(ring, 0x8); 574 575 amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, 576 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); 577 amdgpu_ring_write(ring, 0); 578 579 if (ring->adev->jpeg.inst[ring->me].aid_id) { 580 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET, 581 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0)); 582 amdgpu_ring_write(ring, 0x4); 583 } else { 584 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); 585 amdgpu_ring_write(ring, 0); 586 } 587 588 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 589 0, 0, PACKETJ_TYPE0)); 590 amdgpu_ring_write(ring, 0x3fbc); 591 592 if (ring->adev->jpeg.inst[ring->me].aid_id) { 593 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET, 594 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0)); 595 amdgpu_ring_write(ring, 0x0); 596 } else { 597 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); 598 amdgpu_ring_write(ring, 0); 599 } 600 601 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 602 0, 0, PACKETJ_TYPE0)); 603 amdgpu_ring_write(ring, 0x1); 604 605 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); 606 amdgpu_ring_write(ring, 0); 607 } 608 609 /** 610 * jpeg_v4_0_3_dec_ring_emit_ib - execute indirect buffer 611 * 612 * @ring: amdgpu_ring pointer 613 * @job: job to retrieve vmid from 614 * @ib: indirect buffer to execute 615 * @flags: unused 616 * 617 * Write ring commands to execute the indirect buffer. 618 */ 619 static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring, 620 struct amdgpu_job *job, 621 struct amdgpu_ib *ib, 622 uint32_t flags) 623 { 624 unsigned int vmid = AMDGPU_JOB_GET_VMID(job); 625 626 amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, 627 0, 0, PACKETJ_TYPE0)); 628 amdgpu_ring_write(ring, (vmid | (vmid << 4))); 629 630 amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, 631 0, 0, PACKETJ_TYPE0)); 632 amdgpu_ring_write(ring, (vmid | (vmid << 4))); 633 634 amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, 635 0, 0, PACKETJ_TYPE0)); 636 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 637 638 amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, 639 0, 0, PACKETJ_TYPE0)); 640 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 641 642 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_IB_SIZE_INTERNAL_OFFSET, 643 0, 0, PACKETJ_TYPE0)); 644 amdgpu_ring_write(ring, ib->length_dw); 645 646 amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET, 647 0, 0, PACKETJ_TYPE0)); 648 amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); 649 650 amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET, 651 0, 0, PACKETJ_TYPE0)); 652 amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); 653 654 amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); 655 amdgpu_ring_write(ring, 0); 656 657 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, 658 0, 0, PACKETJ_TYPE0)); 659 amdgpu_ring_write(ring, 0x01400200); 660 661 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, 662 0, 0, PACKETJ_TYPE0)); 663 amdgpu_ring_write(ring, 0x2); 664 665 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_STATUS_INTERNAL_OFFSET, 666 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); 667 amdgpu_ring_write(ring, 0x2); 668 } 669 670 static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 671 uint32_t val, uint32_t mask) 672 { 673 uint32_t reg_offset = (reg << 2); 674 675 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, 676 0, 0, PACKETJ_TYPE0)); 677 amdgpu_ring_write(ring, 0x01400200); 678 679 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, 680 0, 0, PACKETJ_TYPE0)); 681 amdgpu_ring_write(ring, val); 682 683 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 684 0, 0, PACKETJ_TYPE0)); 685 if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { 686 amdgpu_ring_write(ring, 0); 687 amdgpu_ring_write(ring, 688 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); 689 } else { 690 amdgpu_ring_write(ring, reg_offset); 691 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 692 0, 0, PACKETJ_TYPE3)); 693 } 694 amdgpu_ring_write(ring, mask); 695 } 696 697 static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, 698 unsigned int vmid, uint64_t pd_addr) 699 { 700 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; 701 uint32_t data0, data1, mask; 702 703 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 704 705 /* wait for register write */ 706 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; 707 data1 = lower_32_bits(pd_addr); 708 mask = 0xffffffff; 709 jpeg_v4_0_3_dec_ring_emit_reg_wait(ring, data0, data1, mask); 710 } 711 712 static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) 713 { 714 uint32_t reg_offset = (reg << 2); 715 716 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 717 0, 0, PACKETJ_TYPE0)); 718 if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { 719 amdgpu_ring_write(ring, 0); 720 amdgpu_ring_write(ring, 721 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); 722 } else { 723 amdgpu_ring_write(ring, reg_offset); 724 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 725 0, 0, PACKETJ_TYPE0)); 726 } 727 amdgpu_ring_write(ring, val); 728 } 729 730 static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count) 731 { 732 int i; 733 734 WARN_ON(ring->wptr % 2 || count % 2); 735 736 for (i = 0; i < count / 2; i++) { 737 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); 738 amdgpu_ring_write(ring, 0); 739 } 740 } 741 742 static bool jpeg_v4_0_3_is_idle(void *handle) 743 { 744 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 745 bool ret = false; 746 int i, j; 747 748 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 749 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 750 unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); 751 752 ret &= ((RREG32_SOC15_OFFSET( 753 JPEG, GET_INST(JPEG, i), 754 regUVD_JRBC0_UVD_JRBC_STATUS, 755 reg_offset) & 756 UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 757 UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 758 } 759 } 760 761 return ret; 762 } 763 764 static int jpeg_v4_0_3_wait_for_idle(void *handle) 765 { 766 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 767 int ret = 0; 768 int i, j; 769 770 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 771 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 772 unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); 773 774 ret &= SOC15_WAIT_ON_RREG_OFFSET( 775 JPEG, GET_INST(JPEG, i), 776 regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset, 777 UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 778 UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 779 } 780 } 781 return ret; 782 } 783 784 static int jpeg_v4_0_3_set_clockgating_state(void *handle, 785 enum amd_clockgating_state state) 786 { 787 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 788 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 789 int i; 790 791 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 792 if (enable) { 793 if (!jpeg_v4_0_3_is_idle(handle)) 794 return -EBUSY; 795 jpeg_v4_0_3_enable_clock_gating(adev, i); 796 } else { 797 jpeg_v4_0_3_disable_clock_gating(adev, i); 798 } 799 } 800 return 0; 801 } 802 803 static int jpeg_v4_0_3_set_powergating_state(void *handle, 804 enum amd_powergating_state state) 805 { 806 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 807 int ret; 808 809 if (state == adev->jpeg.cur_state) 810 return 0; 811 812 if (state == AMD_PG_STATE_GATE) 813 ret = jpeg_v4_0_3_stop(adev); 814 else 815 ret = jpeg_v4_0_3_start(adev); 816 817 if (!ret) 818 adev->jpeg.cur_state = state; 819 820 return ret; 821 } 822 823 static int jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device *adev, 824 struct amdgpu_irq_src *source, 825 unsigned int type, 826 enum amdgpu_interrupt_state state) 827 { 828 return 0; 829 } 830 831 static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, 832 struct amdgpu_irq_src *source, 833 struct amdgpu_iv_entry *entry) 834 { 835 uint32_t i, inst; 836 837 i = node_id_to_phys_map[entry->node_id]; 838 DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n"); 839 840 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst) 841 if (adev->jpeg.inst[inst].aid_id == i) 842 break; 843 844 if (inst >= adev->jpeg.num_jpeg_inst) { 845 dev_WARN_ONCE(adev->dev, 1, 846 "Interrupt received for unknown JPEG instance %d", 847 entry->node_id); 848 return 0; 849 } 850 851 switch (entry->src_id) { 852 case VCN_4_0__SRCID__JPEG_DECODE: 853 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]); 854 break; 855 case VCN_4_0__SRCID__JPEG1_DECODE: 856 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]); 857 break; 858 case VCN_4_0__SRCID__JPEG2_DECODE: 859 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]); 860 break; 861 case VCN_4_0__SRCID__JPEG3_DECODE: 862 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]); 863 break; 864 case VCN_4_0__SRCID__JPEG4_DECODE: 865 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]); 866 break; 867 case VCN_4_0__SRCID__JPEG5_DECODE: 868 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]); 869 break; 870 case VCN_4_0__SRCID__JPEG6_DECODE: 871 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]); 872 break; 873 case VCN_4_0__SRCID__JPEG7_DECODE: 874 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]); 875 break; 876 default: 877 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", 878 entry->src_id, entry->src_data[0]); 879 break; 880 } 881 882 return 0; 883 } 884 885 static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { 886 .name = "jpeg_v4_0_3", 887 .early_init = jpeg_v4_0_3_early_init, 888 .late_init = NULL, 889 .sw_init = jpeg_v4_0_3_sw_init, 890 .sw_fini = jpeg_v4_0_3_sw_fini, 891 .hw_init = jpeg_v4_0_3_hw_init, 892 .hw_fini = jpeg_v4_0_3_hw_fini, 893 .suspend = jpeg_v4_0_3_suspend, 894 .resume = jpeg_v4_0_3_resume, 895 .is_idle = jpeg_v4_0_3_is_idle, 896 .wait_for_idle = jpeg_v4_0_3_wait_for_idle, 897 .check_soft_reset = NULL, 898 .pre_soft_reset = NULL, 899 .soft_reset = NULL, 900 .post_soft_reset = NULL, 901 .set_clockgating_state = jpeg_v4_0_3_set_clockgating_state, 902 .set_powergating_state = jpeg_v4_0_3_set_powergating_state, 903 }; 904 905 static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { 906 .type = AMDGPU_RING_TYPE_VCN_JPEG, 907 .align_mask = 0xf, 908 .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr, 909 .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr, 910 .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr, 911 .emit_frame_size = 912 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 913 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 914 8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */ 915 22 + 22 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */ 916 8 + 16, 917 .emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */ 918 .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib, 919 .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence, 920 .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush, 921 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 922 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 923 .insert_nop = jpeg_v4_0_3_dec_ring_nop, 924 .insert_start = jpeg_v4_0_3_dec_ring_insert_start, 925 .insert_end = jpeg_v4_0_3_dec_ring_insert_end, 926 .pad_ib = amdgpu_ring_generic_pad_ib, 927 .begin_use = amdgpu_jpeg_ring_begin_use, 928 .end_use = amdgpu_jpeg_ring_end_use, 929 .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg, 930 .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait, 931 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 932 }; 933 934 static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) 935 { 936 int i, j, jpeg_inst; 937 938 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 939 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { 940 adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v4_0_3_dec_ring_vm_funcs; 941 adev->jpeg.inst[i].ring_dec[j].me = i; 942 adev->jpeg.inst[i].ring_dec[j].pipe = j; 943 } 944 jpeg_inst = GET_INST(JPEG, i); 945 adev->jpeg.inst[i].aid_id = 946 jpeg_inst / adev->jpeg.num_inst_per_aid; 947 } 948 DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); 949 } 950 951 static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = { 952 .set = jpeg_v4_0_3_set_interrupt_state, 953 .process = jpeg_v4_0_3_process_interrupt, 954 }; 955 956 static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) 957 { 958 int i; 959 960 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 961 adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings; 962 } 963 adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs; 964 } 965 966 const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = { 967 .type = AMD_IP_BLOCK_TYPE_JPEG, 968 .major = 4, 969 .minor = 0, 970 .rev = 3, 971 .funcs = &jpeg_v4_0_3_ip_funcs, 972 }; 973 974 static const struct amdgpu_ras_err_status_reg_entry jpeg_v4_0_3_ue_reg_list[] = { 975 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG0S, regVCN_UE_ERR_STATUS_HI_JPEG0S), 976 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG0S"}, 977 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG0D, regVCN_UE_ERR_STATUS_HI_JPEG0D), 978 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG0D"}, 979 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG1S, regVCN_UE_ERR_STATUS_HI_JPEG1S), 980 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG1S"}, 981 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG1D, regVCN_UE_ERR_STATUS_HI_JPEG1D), 982 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG1D"}, 983 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG2S, regVCN_UE_ERR_STATUS_HI_JPEG2S), 984 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG2S"}, 985 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG2D, regVCN_UE_ERR_STATUS_HI_JPEG2D), 986 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG2D"}, 987 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG3S, regVCN_UE_ERR_STATUS_HI_JPEG3S), 988 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG3S"}, 989 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG3D, regVCN_UE_ERR_STATUS_HI_JPEG3D), 990 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG3D"}, 991 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG4S, regVCN_UE_ERR_STATUS_HI_JPEG4S), 992 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG4S"}, 993 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG4D, regVCN_UE_ERR_STATUS_HI_JPEG4D), 994 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG4D"}, 995 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG5S, regVCN_UE_ERR_STATUS_HI_JPEG5S), 996 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG5S"}, 997 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG5D, regVCN_UE_ERR_STATUS_HI_JPEG5D), 998 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG5D"}, 999 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG6S, regVCN_UE_ERR_STATUS_HI_JPEG6S), 1000 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG6S"}, 1001 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG6D, regVCN_UE_ERR_STATUS_HI_JPEG6D), 1002 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG6D"}, 1003 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG7S, regVCN_UE_ERR_STATUS_HI_JPEG7S), 1004 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG7S"}, 1005 {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG7D, regVCN_UE_ERR_STATUS_HI_JPEG7D), 1006 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG7D"}, 1007 }; 1008 1009 static void jpeg_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev, 1010 uint32_t jpeg_inst, 1011 void *ras_err_status) 1012 { 1013 struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status; 1014 1015 /* jpeg v4_0_3 only support uncorrectable errors */ 1016 amdgpu_ras_inst_query_ras_error_count(adev, 1017 jpeg_v4_0_3_ue_reg_list, 1018 ARRAY_SIZE(jpeg_v4_0_3_ue_reg_list), 1019 NULL, 0, GET_INST(VCN, jpeg_inst), 1020 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 1021 &err_data->ue_count); 1022 } 1023 1024 static void jpeg_v4_0_3_query_ras_error_count(struct amdgpu_device *adev, 1025 void *ras_err_status) 1026 { 1027 uint32_t i; 1028 1029 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) { 1030 dev_warn(adev->dev, "JPEG RAS is not supported\n"); 1031 return; 1032 } 1033 1034 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) 1035 jpeg_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status); 1036 } 1037 1038 static void jpeg_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev, 1039 uint32_t jpeg_inst) 1040 { 1041 amdgpu_ras_inst_reset_ras_error_count(adev, 1042 jpeg_v4_0_3_ue_reg_list, 1043 ARRAY_SIZE(jpeg_v4_0_3_ue_reg_list), 1044 GET_INST(VCN, jpeg_inst)); 1045 } 1046 1047 static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev) 1048 { 1049 uint32_t i; 1050 1051 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) { 1052 dev_warn(adev->dev, "JPEG RAS is not supported\n"); 1053 return; 1054 } 1055 1056 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) 1057 jpeg_v4_0_3_inst_reset_ras_error_count(adev, i); 1058 } 1059 1060 static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = { 1061 .query_ras_error_count = jpeg_v4_0_3_query_ras_error_count, 1062 .reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count, 1063 }; 1064 1065 static struct amdgpu_jpeg_ras jpeg_v4_0_3_ras = { 1066 .ras_block = { 1067 .hw_ops = &jpeg_v4_0_3_ras_hw_ops, 1068 }, 1069 }; 1070 1071 static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev) 1072 { 1073 adev->jpeg.ras = &jpeg_v4_0_3_ras; 1074 } 1075