1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "amdgpu_jpeg.h"
26 #include "soc15.h"
27 #include "soc15d.h"
28 #include "jpeg_v4_0_3.h"
29 
30 #include "vcn/vcn_4_0_3_offset.h"
31 #include "vcn/vcn_4_0_3_sh_mask.h"
32 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
33 
34 enum jpeg_engin_status {
35 	UVD_PGFSM_STATUS__UVDJ_PWR_ON  = 0,
36 	UVD_PGFSM_STATUS__UVDJ_PWR_OFF = 2,
37 };
38 
39 static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev);
40 static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
41 static int jpeg_v4_0_3_set_powergating_state(void *handle,
42 				enum amd_powergating_state state);
43 static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
44 
45 static int amdgpu_ih_srcid_jpeg[] = {
46 	VCN_4_0__SRCID__JPEG_DECODE,
47 	VCN_4_0__SRCID__JPEG1_DECODE,
48 	VCN_4_0__SRCID__JPEG2_DECODE,
49 	VCN_4_0__SRCID__JPEG3_DECODE,
50 	VCN_4_0__SRCID__JPEG4_DECODE,
51 	VCN_4_0__SRCID__JPEG5_DECODE,
52 	VCN_4_0__SRCID__JPEG6_DECODE,
53 	VCN_4_0__SRCID__JPEG7_DECODE
54 };
55 
56 /**
57  * jpeg_v4_0_3_early_init - set function pointers
58  *
59  * @handle: amdgpu_device pointer
60  *
61  * Set ring and irq function pointers
62  */
63 static int jpeg_v4_0_3_early_init(void *handle)
64 {
65 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
66 
67 	adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
68 
69 	jpeg_v4_0_3_set_dec_ring_funcs(adev);
70 	jpeg_v4_0_3_set_irq_funcs(adev);
71 	jpeg_v4_0_3_set_ras_funcs(adev);
72 
73 	return 0;
74 }
75 
76 /**
77  * jpeg_v4_0_3_sw_init - sw init for JPEG block
78  *
79  * @handle: amdgpu_device pointer
80  *
81  * Load firmware and sw initialization
82  */
83 static int jpeg_v4_0_3_sw_init(void *handle)
84 {
85 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
86 	struct amdgpu_ring *ring;
87 	int i, j, r, jpeg_inst;
88 
89 	for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
90 		/* JPEG TRAP */
91 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
92 				amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq);
93 		if (r)
94 			return r;
95 	}
96 
97 	r = amdgpu_jpeg_sw_init(adev);
98 	if (r)
99 		return r;
100 
101 	r = amdgpu_jpeg_resume(adev);
102 	if (r)
103 		return r;
104 
105 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
106 		jpeg_inst = GET_INST(JPEG, i);
107 
108 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
109 			ring = &adev->jpeg.inst[i].ring_dec[j];
110 			ring->use_doorbell = true;
111 			ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
112 			if (!amdgpu_sriov_vf(adev)) {
113 				ring->doorbell_index =
114 					(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
115 					1 + j + 9 * jpeg_inst;
116 			} else {
117 				if (j < 4)
118 					ring->doorbell_index =
119 						(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
120 						4 + j + 32 * jpeg_inst;
121 				else
122 					ring->doorbell_index =
123 						(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
124 						8 + j + 32 * jpeg_inst;
125 			}
126 			sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
127 			r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
128 						AMDGPU_RING_PRIO_DEFAULT, NULL);
129 			if (r)
130 				return r;
131 
132 			adev->jpeg.internal.jpeg_pitch[j] =
133 				regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET;
134 			adev->jpeg.inst[i].external.jpeg_pitch[j] =
135 				SOC15_REG_OFFSET1(
136 					JPEG, jpeg_inst,
137 					regUVD_JRBC0_UVD_JRBC_SCRATCH0,
138 					(j ? (0x40 * j - 0xc80) : 0));
139 		}
140 	}
141 
142 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
143 		r = amdgpu_jpeg_ras_sw_init(adev);
144 		if (r) {
145 			dev_err(adev->dev, "Failed to initialize jpeg ras block!\n");
146 			return r;
147 		}
148 	}
149 
150 	return 0;
151 }
152 
153 /**
154  * jpeg_v4_0_3_sw_fini - sw fini for JPEG block
155  *
156  * @handle: amdgpu_device pointer
157  *
158  * JPEG suspend and free up sw allocation
159  */
160 static int jpeg_v4_0_3_sw_fini(void *handle)
161 {
162 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
163 	int r;
164 
165 	r = amdgpu_jpeg_suspend(adev);
166 	if (r)
167 		return r;
168 
169 	r = amdgpu_jpeg_sw_fini(adev);
170 
171 	return r;
172 }
173 
174 /**
175  * jpeg_v4_0_3_hw_init - start and test JPEG block
176  *
177  * @handle: amdgpu_device pointer
178  *
179  */
180 static int jpeg_v4_0_3_hw_init(void *handle)
181 {
182 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
183 	struct amdgpu_ring *ring;
184 	int i, j, r, jpeg_inst;
185 
186 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
187 		jpeg_inst = GET_INST(JPEG, i);
188 
189 		ring = adev->jpeg.inst[i].ring_dec;
190 
191 		if (ring->use_doorbell)
192 			adev->nbio.funcs->vcn_doorbell_range(
193 				adev, ring->use_doorbell,
194 				(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
195 					9 * jpeg_inst,
196 				adev->jpeg.inst[i].aid_id);
197 
198 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
199 			ring = &adev->jpeg.inst[i].ring_dec[j];
200 			if (ring->use_doorbell)
201 				WREG32_SOC15_OFFSET(
202 					VCN, GET_INST(VCN, i),
203 					regVCN_JPEG_DB_CTRL,
204 					(ring->pipe ? (ring->pipe - 0x15) : 0),
205 					ring->doorbell_index
206 							<< VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
207 						VCN_JPEG_DB_CTRL__EN_MASK);
208 			r = amdgpu_ring_test_helper(ring);
209 			if (r)
210 				return r;
211 		}
212 	}
213 	DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
214 
215 	return 0;
216 }
217 
218 /**
219  * jpeg_v4_0_3_hw_fini - stop the hardware block
220  *
221  * @handle: amdgpu_device pointer
222  *
223  * Stop the JPEG block, mark ring as not ready any more
224  */
225 static int jpeg_v4_0_3_hw_fini(void *handle)
226 {
227 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
228 	int ret = 0;
229 
230 	cancel_delayed_work_sync(&adev->jpeg.idle_work);
231 
232 	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
233 		ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
234 
235 	return ret;
236 }
237 
238 /**
239  * jpeg_v4_0_3_suspend - suspend JPEG block
240  *
241  * @handle: amdgpu_device pointer
242  *
243  * HW fini and suspend JPEG block
244  */
245 static int jpeg_v4_0_3_suspend(void *handle)
246 {
247 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
248 	int r;
249 
250 	r = jpeg_v4_0_3_hw_fini(adev);
251 	if (r)
252 		return r;
253 
254 	r = amdgpu_jpeg_suspend(adev);
255 
256 	return r;
257 }
258 
259 /**
260  * jpeg_v4_0_3_resume - resume JPEG block
261  *
262  * @handle: amdgpu_device pointer
263  *
264  * Resume firmware and hw init JPEG block
265  */
266 static int jpeg_v4_0_3_resume(void *handle)
267 {
268 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
269 	int r;
270 
271 	r = amdgpu_jpeg_resume(adev);
272 	if (r)
273 		return r;
274 
275 	r = jpeg_v4_0_3_hw_init(adev);
276 
277 	return r;
278 }
279 
280 static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
281 {
282 	int i, jpeg_inst;
283 	uint32_t data;
284 
285 	jpeg_inst = GET_INST(JPEG, inst_idx);
286 	data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL);
287 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
288 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
289 		data &= (~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1));
290 	} else {
291 		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
292 	}
293 
294 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
295 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
296 	WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data);
297 
298 	data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE);
299 	data &= ~(JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK);
300 	for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i)
301 		data &= ~(JPEG_CGC_GATE__JPEG0_DEC_MASK << i);
302 	WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data);
303 }
304 
305 static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
306 {
307 	int i, jpeg_inst;
308 	uint32_t data;
309 
310 	jpeg_inst = GET_INST(JPEG, inst_idx);
311 	data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL);
312 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
313 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
314 		data |= (JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1);
315 	} else {
316 		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
317 	}
318 
319 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
320 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
321 	WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data);
322 
323 	data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE);
324 	data |= (JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK);
325 	for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i)
326 		data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK << i);
327 	WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data);
328 }
329 
330 /**
331  * jpeg_v4_0_3_start - start JPEG block
332  *
333  * @adev: amdgpu_device pointer
334  *
335  * Setup and start the JPEG block
336  */
337 static int jpeg_v4_0_3_start(struct amdgpu_device *adev)
338 {
339 	struct amdgpu_ring *ring;
340 	int i, j, jpeg_inst;
341 
342 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
343 		jpeg_inst = GET_INST(JPEG, i);
344 
345 		WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
346 			     1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
347 		SOC15_WAIT_ON_RREG(
348 			JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
349 			UVD_PGFSM_STATUS__UVDJ_PWR_ON
350 				<< UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
351 			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
352 
353 		/* disable anti hang mechanism */
354 		WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
355 					  regUVD_JPEG_POWER_STATUS),
356 			 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
357 
358 		/* JPEG disable CGC */
359 		jpeg_v4_0_3_disable_clock_gating(adev, i);
360 
361 		/* MJPEG global tiling registers */
362 		WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX8_ADDR_CONFIG,
363 			     adev->gfx.config.gb_addr_config);
364 		WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX10_ADDR_CONFIG,
365 			     adev->gfx.config.gb_addr_config);
366 
367 		/* enable JMI channel */
368 		WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
369 			 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
370 
371 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
372 			unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
373 
374 			ring = &adev->jpeg.inst[i].ring_dec[j];
375 
376 			/* enable System Interrupt for JRBC */
377 			WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
378 						  regJPEG_SYS_INT_EN),
379 				 JPEG_SYS_INT_EN__DJRBC0_MASK << j,
380 				 ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j));
381 
382 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
383 					    regUVD_JMI0_UVD_LMI_JRBC_RB_VMID,
384 					    reg_offset, 0);
385 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
386 					    regUVD_JRBC0_UVD_JRBC_RB_CNTL,
387 					    reg_offset,
388 					    (0x00000001L | 0x00000002L));
389 			WREG32_SOC15_OFFSET(
390 				JPEG, jpeg_inst,
391 				regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW,
392 				reg_offset, lower_32_bits(ring->gpu_addr));
393 			WREG32_SOC15_OFFSET(
394 				JPEG, jpeg_inst,
395 				regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
396 				reg_offset, upper_32_bits(ring->gpu_addr));
397 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
398 					    regUVD_JRBC0_UVD_JRBC_RB_RPTR,
399 					    reg_offset, 0);
400 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
401 					    regUVD_JRBC0_UVD_JRBC_RB_WPTR,
402 					    reg_offset, 0);
403 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
404 					    regUVD_JRBC0_UVD_JRBC_RB_CNTL,
405 					    reg_offset, 0x00000002L);
406 			WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
407 					    regUVD_JRBC0_UVD_JRBC_RB_SIZE,
408 					    reg_offset, ring->ring_size / 4);
409 			ring->wptr = RREG32_SOC15_OFFSET(
410 				JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_RB_WPTR,
411 				reg_offset);
412 		}
413 	}
414 
415 	return 0;
416 }
417 
418 /**
419  * jpeg_v4_0_3_stop - stop JPEG block
420  *
421  * @adev: amdgpu_device pointer
422  *
423  * stop the JPEG block
424  */
425 static int jpeg_v4_0_3_stop(struct amdgpu_device *adev)
426 {
427 	int i, jpeg_inst;
428 
429 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
430 		jpeg_inst = GET_INST(JPEG, i);
431 		/* reset JMI */
432 		WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
433 			 UVD_JMI_CNTL__SOFT_RESET_MASK,
434 			 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
435 
436 		jpeg_v4_0_3_enable_clock_gating(adev, i);
437 
438 		/* enable anti hang mechanism */
439 		WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
440 					  regUVD_JPEG_POWER_STATUS),
441 			 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
442 			 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
443 
444 		WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
445 			     2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
446 		SOC15_WAIT_ON_RREG(
447 			JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
448 			UVD_PGFSM_STATUS__UVDJ_PWR_OFF
449 				<< UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
450 			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
451 	}
452 
453 	return 0;
454 }
455 
456 /**
457  * jpeg_v4_0_3_dec_ring_get_rptr - get read pointer
458  *
459  * @ring: amdgpu_ring pointer
460  *
461  * Returns the current hardware read pointer
462  */
463 static uint64_t jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring)
464 {
465 	struct amdgpu_device *adev = ring->adev;
466 
467 	return RREG32_SOC15_OFFSET(
468 		JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_RPTR,
469 		ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
470 }
471 
472 /**
473  * jpeg_v4_0_3_dec_ring_get_wptr - get write pointer
474  *
475  * @ring: amdgpu_ring pointer
476  *
477  * Returns the current hardware write pointer
478  */
479 static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring)
480 {
481 	struct amdgpu_device *adev = ring->adev;
482 
483 	if (ring->use_doorbell)
484 		return adev->wb.wb[ring->wptr_offs];
485 	else
486 		return RREG32_SOC15_OFFSET(
487 			JPEG, GET_INST(JPEG, ring->me),
488 			regUVD_JRBC0_UVD_JRBC_RB_WPTR,
489 			ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
490 }
491 
492 /**
493  * jpeg_v4_0_3_dec_ring_set_wptr - set write pointer
494  *
495  * @ring: amdgpu_ring pointer
496  *
497  * Commits the write pointer to the hardware
498  */
499 static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
500 {
501 	struct amdgpu_device *adev = ring->adev;
502 
503 	if (ring->use_doorbell) {
504 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
505 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
506 	} else {
507 		WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me),
508 				    regUVD_JRBC0_UVD_JRBC_RB_WPTR,
509 				    (ring->pipe ? (0x40 * ring->pipe - 0xc80) :
510 						  0),
511 				    lower_32_bits(ring->wptr));
512 	}
513 }
514 
515 /**
516  * jpeg_v4_0_3_dec_ring_insert_start - insert a start command
517  *
518  * @ring: amdgpu_ring pointer
519  *
520  * Write a start command to the ring.
521  */
522 static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
523 {
524 	amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
525 		0, 0, PACKETJ_TYPE0));
526 	amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
527 
528 	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
529 		0, 0, PACKETJ_TYPE0));
530 	amdgpu_ring_write(ring, 0x80004000);
531 }
532 
533 /**
534  * jpeg_v4_0_3_dec_ring_insert_end - insert a end command
535  *
536  * @ring: amdgpu_ring pointer
537  *
538  * Write a end command to the ring.
539  */
540 static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
541 {
542 	amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
543 		0, 0, PACKETJ_TYPE0));
544 	amdgpu_ring_write(ring, 0x62a04);
545 
546 	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
547 		0, 0, PACKETJ_TYPE0));
548 	amdgpu_ring_write(ring, 0x00004000);
549 }
550 
551 /**
552  * jpeg_v4_0_3_dec_ring_emit_fence - emit an fence & trap command
553  *
554  * @ring: amdgpu_ring pointer
555  * @addr: address
556  * @seq: sequence number
557  * @flags: fence related flags
558  *
559  * Write a fence and a trap command to the ring.
560  */
561 static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
562 				unsigned int flags)
563 {
564 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
565 
566 	amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
567 		0, 0, PACKETJ_TYPE0));
568 	amdgpu_ring_write(ring, seq);
569 
570 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
571 		0, 0, PACKETJ_TYPE0));
572 	amdgpu_ring_write(ring, seq);
573 
574 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
575 		0, 0, PACKETJ_TYPE0));
576 	amdgpu_ring_write(ring, lower_32_bits(addr));
577 
578 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
579 		0, 0, PACKETJ_TYPE0));
580 	amdgpu_ring_write(ring, upper_32_bits(addr));
581 
582 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
583 		0, 0, PACKETJ_TYPE0));
584 	amdgpu_ring_write(ring, 0x8);
585 
586 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
587 		0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
588 	amdgpu_ring_write(ring, 0);
589 
590 	if (ring->adev->jpeg.inst[ring->me].aid_id) {
591 		amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
592 			0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
593 		amdgpu_ring_write(ring, 0x4);
594 	} else {
595 		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
596 		amdgpu_ring_write(ring, 0);
597 	}
598 
599 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
600 		0, 0, PACKETJ_TYPE0));
601 	amdgpu_ring_write(ring, 0x3fbc);
602 
603 	if (ring->adev->jpeg.inst[ring->me].aid_id) {
604 		amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
605 			0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
606 		amdgpu_ring_write(ring, 0x0);
607 	} else {
608 		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
609 		amdgpu_ring_write(ring, 0);
610 	}
611 
612 	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
613 		0, 0, PACKETJ_TYPE0));
614 	amdgpu_ring_write(ring, 0x1);
615 
616 	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
617 	amdgpu_ring_write(ring, 0);
618 }
619 
620 /**
621  * jpeg_v4_0_3_dec_ring_emit_ib - execute indirect buffer
622  *
623  * @ring: amdgpu_ring pointer
624  * @job: job to retrieve vmid from
625  * @ib: indirect buffer to execute
626  * @flags: unused
627  *
628  * Write ring commands to execute the indirect buffer.
629  */
630 static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
631 				struct amdgpu_job *job,
632 				struct amdgpu_ib *ib,
633 				uint32_t flags)
634 {
635 	unsigned int vmid = AMDGPU_JOB_GET_VMID(job);
636 
637 	amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
638 		0, 0, PACKETJ_TYPE0));
639 	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
640 
641 	amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
642 		0, 0, PACKETJ_TYPE0));
643 	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
644 
645 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
646 		0, 0, PACKETJ_TYPE0));
647 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
648 
649 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
650 		0, 0, PACKETJ_TYPE0));
651 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
652 
653 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
654 		0, 0, PACKETJ_TYPE0));
655 	amdgpu_ring_write(ring, ib->length_dw);
656 
657 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
658 		0, 0, PACKETJ_TYPE0));
659 	amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
660 
661 	amdgpu_ring_write(ring,	PACKETJ(regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
662 		0, 0, PACKETJ_TYPE0));
663 	amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
664 
665 	amdgpu_ring_write(ring,	PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
666 	amdgpu_ring_write(ring, 0);
667 
668 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
669 		0, 0, PACKETJ_TYPE0));
670 	amdgpu_ring_write(ring, 0x01400200);
671 
672 	amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
673 		0, 0, PACKETJ_TYPE0));
674 	amdgpu_ring_write(ring, 0x2);
675 
676 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_STATUS_INTERNAL_OFFSET,
677 		0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
678 	amdgpu_ring_write(ring, 0x2);
679 }
680 
681 static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
682 				uint32_t val, uint32_t mask)
683 {
684 	uint32_t reg_offset = (reg << 2);
685 
686 	amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
687 		0, 0, PACKETJ_TYPE0));
688 	amdgpu_ring_write(ring, 0x01400200);
689 
690 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
691 		0, 0, PACKETJ_TYPE0));
692 	amdgpu_ring_write(ring, val);
693 
694 	amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
695 		0, 0, PACKETJ_TYPE0));
696 	if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
697 		amdgpu_ring_write(ring, 0);
698 		amdgpu_ring_write(ring,
699 			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
700 	} else {
701 		amdgpu_ring_write(ring, reg_offset);
702 		amdgpu_ring_write(ring,	PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
703 			0, 0, PACKETJ_TYPE3));
704 	}
705 	amdgpu_ring_write(ring, mask);
706 }
707 
708 static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
709 				unsigned int vmid, uint64_t pd_addr)
710 {
711 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
712 	uint32_t data0, data1, mask;
713 
714 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
715 
716 	/* wait for register write */
717 	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
718 	data1 = lower_32_bits(pd_addr);
719 	mask = 0xffffffff;
720 	jpeg_v4_0_3_dec_ring_emit_reg_wait(ring, data0, data1, mask);
721 }
722 
723 static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
724 {
725 	uint32_t reg_offset = (reg << 2);
726 
727 	amdgpu_ring_write(ring,	PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
728 		0, 0, PACKETJ_TYPE0));
729 	if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
730 		amdgpu_ring_write(ring, 0);
731 		amdgpu_ring_write(ring,
732 			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
733 	} else {
734 		amdgpu_ring_write(ring, reg_offset);
735 		amdgpu_ring_write(ring,	PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
736 			0, 0, PACKETJ_TYPE0));
737 	}
738 	amdgpu_ring_write(ring, val);
739 }
740 
741 static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
742 {
743 	int i;
744 
745 	WARN_ON(ring->wptr % 2 || count % 2);
746 
747 	for (i = 0; i < count / 2; i++) {
748 		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
749 		amdgpu_ring_write(ring, 0);
750 	}
751 }
752 
753 static bool jpeg_v4_0_3_is_idle(void *handle)
754 {
755 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
756 	bool ret = false;
757 	int i, j;
758 
759 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
760 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
761 			unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
762 
763 			ret &= ((RREG32_SOC15_OFFSET(
764 					 JPEG, GET_INST(JPEG, i),
765 					 regUVD_JRBC0_UVD_JRBC_STATUS,
766 					 reg_offset) &
767 				 UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
768 				UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
769 		}
770 	}
771 
772 	return ret;
773 }
774 
775 static int jpeg_v4_0_3_wait_for_idle(void *handle)
776 {
777 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
778 	int ret = 0;
779 	int i, j;
780 
781 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
782 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
783 			unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
784 
785 			ret &= SOC15_WAIT_ON_RREG_OFFSET(
786 				JPEG, GET_INST(JPEG, i),
787 				regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset,
788 				UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
789 				UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
790 		}
791 	}
792 	return ret;
793 }
794 
795 static int jpeg_v4_0_3_set_clockgating_state(void *handle,
796 					  enum amd_clockgating_state state)
797 {
798 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
799 	bool enable = state == AMD_CG_STATE_GATE;
800 	int i;
801 
802 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
803 		if (enable) {
804 			if (!jpeg_v4_0_3_is_idle(handle))
805 				return -EBUSY;
806 			jpeg_v4_0_3_enable_clock_gating(adev, i);
807 		} else {
808 			jpeg_v4_0_3_disable_clock_gating(adev, i);
809 		}
810 	}
811 	return 0;
812 }
813 
814 static int jpeg_v4_0_3_set_powergating_state(void *handle,
815 					  enum amd_powergating_state state)
816 {
817 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
818 	int ret;
819 
820 	if (state == adev->jpeg.cur_state)
821 		return 0;
822 
823 	if (state == AMD_PG_STATE_GATE)
824 		ret = jpeg_v4_0_3_stop(adev);
825 	else
826 		ret = jpeg_v4_0_3_start(adev);
827 
828 	if (!ret)
829 		adev->jpeg.cur_state = state;
830 
831 	return ret;
832 }
833 
834 static int jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
835 					struct amdgpu_irq_src *source,
836 					unsigned int type,
837 					enum amdgpu_interrupt_state state)
838 {
839 	return 0;
840 }
841 
842 static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
843 				      struct amdgpu_irq_src *source,
844 				      struct amdgpu_iv_entry *entry)
845 {
846 	uint32_t i, inst;
847 
848 	i = node_id_to_phys_map[entry->node_id];
849 	DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n");
850 
851 	for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst)
852 		if (adev->jpeg.inst[inst].aid_id == i)
853 			break;
854 
855 	if (inst >= adev->jpeg.num_jpeg_inst) {
856 		dev_WARN_ONCE(adev->dev, 1,
857 			      "Interrupt received for unknown JPEG instance %d",
858 			      entry->node_id);
859 		return 0;
860 	}
861 
862 	switch (entry->src_id) {
863 	case VCN_4_0__SRCID__JPEG_DECODE:
864 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]);
865 		break;
866 	case VCN_4_0__SRCID__JPEG1_DECODE:
867 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]);
868 		break;
869 	case VCN_4_0__SRCID__JPEG2_DECODE:
870 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]);
871 		break;
872 	case VCN_4_0__SRCID__JPEG3_DECODE:
873 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]);
874 		break;
875 	case VCN_4_0__SRCID__JPEG4_DECODE:
876 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]);
877 		break;
878 	case VCN_4_0__SRCID__JPEG5_DECODE:
879 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]);
880 		break;
881 	case VCN_4_0__SRCID__JPEG6_DECODE:
882 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]);
883 		break;
884 	case VCN_4_0__SRCID__JPEG7_DECODE:
885 		amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]);
886 		break;
887 	default:
888 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
889 			  entry->src_id, entry->src_data[0]);
890 		break;
891 	}
892 
893 	return 0;
894 }
895 
896 static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
897 	.name = "jpeg_v4_0_3",
898 	.early_init = jpeg_v4_0_3_early_init,
899 	.late_init = NULL,
900 	.sw_init = jpeg_v4_0_3_sw_init,
901 	.sw_fini = jpeg_v4_0_3_sw_fini,
902 	.hw_init = jpeg_v4_0_3_hw_init,
903 	.hw_fini = jpeg_v4_0_3_hw_fini,
904 	.suspend = jpeg_v4_0_3_suspend,
905 	.resume = jpeg_v4_0_3_resume,
906 	.is_idle = jpeg_v4_0_3_is_idle,
907 	.wait_for_idle = jpeg_v4_0_3_wait_for_idle,
908 	.check_soft_reset = NULL,
909 	.pre_soft_reset = NULL,
910 	.soft_reset = NULL,
911 	.post_soft_reset = NULL,
912 	.set_clockgating_state = jpeg_v4_0_3_set_clockgating_state,
913 	.set_powergating_state = jpeg_v4_0_3_set_powergating_state,
914 };
915 
916 static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
917 	.type = AMDGPU_RING_TYPE_VCN_JPEG,
918 	.align_mask = 0xf,
919 	.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
920 	.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
921 	.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
922 	.emit_frame_size =
923 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
924 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
925 		8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */
926 		22 + 22 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */
927 		8 + 16,
928 	.emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */
929 	.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
930 	.emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
931 	.emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
932 	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
933 	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
934 	.insert_nop = jpeg_v4_0_3_dec_ring_nop,
935 	.insert_start = jpeg_v4_0_3_dec_ring_insert_start,
936 	.insert_end = jpeg_v4_0_3_dec_ring_insert_end,
937 	.pad_ib = amdgpu_ring_generic_pad_ib,
938 	.begin_use = amdgpu_jpeg_ring_begin_use,
939 	.end_use = amdgpu_jpeg_ring_end_use,
940 	.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
941 	.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
942 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
943 };
944 
945 static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev)
946 {
947 	int i, j, jpeg_inst;
948 
949 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
950 		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
951 			adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v4_0_3_dec_ring_vm_funcs;
952 			adev->jpeg.inst[i].ring_dec[j].me = i;
953 			adev->jpeg.inst[i].ring_dec[j].pipe = j;
954 		}
955 		jpeg_inst = GET_INST(JPEG, i);
956 		adev->jpeg.inst[i].aid_id =
957 			jpeg_inst / adev->jpeg.num_inst_per_aid;
958 	}
959 	DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
960 }
961 
962 static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = {
963 	.set = jpeg_v4_0_3_set_interrupt_state,
964 	.process = jpeg_v4_0_3_process_interrupt,
965 };
966 
967 static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
968 {
969 	int i;
970 
971 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
972 		adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
973 	}
974 	adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs;
975 }
976 
977 const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = {
978 	.type = AMD_IP_BLOCK_TYPE_JPEG,
979 	.major = 4,
980 	.minor = 0,
981 	.rev = 3,
982 	.funcs = &jpeg_v4_0_3_ip_funcs,
983 };
984 
985 static const struct amdgpu_ras_err_status_reg_entry jpeg_v4_0_3_ue_reg_list[] = {
986 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG0S, regVCN_UE_ERR_STATUS_HI_JPEG0S),
987 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG0S"},
988 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG0D, regVCN_UE_ERR_STATUS_HI_JPEG0D),
989 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG0D"},
990 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG1S, regVCN_UE_ERR_STATUS_HI_JPEG1S),
991 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG1S"},
992 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG1D, regVCN_UE_ERR_STATUS_HI_JPEG1D),
993 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG1D"},
994 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG2S, regVCN_UE_ERR_STATUS_HI_JPEG2S),
995 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG2S"},
996 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG2D, regVCN_UE_ERR_STATUS_HI_JPEG2D),
997 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG2D"},
998 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG3S, regVCN_UE_ERR_STATUS_HI_JPEG3S),
999 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG3S"},
1000 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG3D, regVCN_UE_ERR_STATUS_HI_JPEG3D),
1001 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG3D"},
1002 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG4S, regVCN_UE_ERR_STATUS_HI_JPEG4S),
1003 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG4S"},
1004 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG4D, regVCN_UE_ERR_STATUS_HI_JPEG4D),
1005 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG4D"},
1006 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG5S, regVCN_UE_ERR_STATUS_HI_JPEG5S),
1007 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG5S"},
1008 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG5D, regVCN_UE_ERR_STATUS_HI_JPEG5D),
1009 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG5D"},
1010 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG6S, regVCN_UE_ERR_STATUS_HI_JPEG6S),
1011 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG6S"},
1012 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG6D, regVCN_UE_ERR_STATUS_HI_JPEG6D),
1013 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG6D"},
1014 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG7S, regVCN_UE_ERR_STATUS_HI_JPEG7S),
1015 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG7S"},
1016 	{AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG7D, regVCN_UE_ERR_STATUS_HI_JPEG7D),
1017 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG7D"},
1018 };
1019 
1020 static void jpeg_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev,
1021 						   uint32_t jpeg_inst,
1022 						   void *ras_err_status)
1023 {
1024 	struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
1025 
1026 	/* jpeg v4_0_3 only support uncorrectable errors */
1027 	amdgpu_ras_inst_query_ras_error_count(adev,
1028 			jpeg_v4_0_3_ue_reg_list,
1029 			ARRAY_SIZE(jpeg_v4_0_3_ue_reg_list),
1030 			NULL, 0, GET_INST(VCN, jpeg_inst),
1031 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
1032 			&err_data->ue_count);
1033 }
1034 
1035 static void jpeg_v4_0_3_query_ras_error_count(struct amdgpu_device *adev,
1036 					      void *ras_err_status)
1037 {
1038 	uint32_t i;
1039 
1040 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
1041 		dev_warn(adev->dev, "JPEG RAS is not supported\n");
1042 		return;
1043 	}
1044 
1045 	for (i = 0; i < adev->jpeg.num_jpeg_inst; i++)
1046 		jpeg_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status);
1047 }
1048 
1049 static void jpeg_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev,
1050 						   uint32_t jpeg_inst)
1051 {
1052 	amdgpu_ras_inst_reset_ras_error_count(adev,
1053 			jpeg_v4_0_3_ue_reg_list,
1054 			ARRAY_SIZE(jpeg_v4_0_3_ue_reg_list),
1055 			GET_INST(VCN, jpeg_inst));
1056 }
1057 
1058 static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
1059 {
1060 	uint32_t i;
1061 
1062 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
1063 		dev_warn(adev->dev, "JPEG RAS is not supported\n");
1064 		return;
1065 	}
1066 
1067 	for (i = 0; i < adev->jpeg.num_jpeg_inst; i++)
1068 		jpeg_v4_0_3_inst_reset_ras_error_count(adev, i);
1069 }
1070 
1071 static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = {
1072 	.query_ras_error_count = jpeg_v4_0_3_query_ras_error_count,
1073 	.reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count,
1074 };
1075 
1076 static struct amdgpu_jpeg_ras jpeg_v4_0_3_ras = {
1077 	.ras_block = {
1078 		.hw_ops = &jpeg_v4_0_3_ras_hw_ops,
1079 	},
1080 };
1081 
1082 static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
1083 {
1084 	adev->jpeg.ras = &jpeg_v4_0_3_ras;
1085 }
1086